aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/message
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/message')
-rw-r--r--drivers/message/fusion/mptbase.c14
-rw-r--r--drivers/message/fusion/mptbase.h34
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptfc.c24
-rw-r--r--drivers/message/fusion/mptsas.c55
-rw-r--r--drivers/message/fusion/mptscsih.c968
-rw-r--r--drivers/message/fusion/mptscsih.h2
-rw-r--r--drivers/message/fusion/mptspi.c24
-rw-r--r--drivers/message/i2o/Kconfig12
-rw-r--r--drivers/message/i2o/bus-osm.c23
-rw-r--r--drivers/message/i2o/config-osm.c2
-rw-r--r--drivers/message/i2o/core.h20
-rw-r--r--drivers/message/i2o/device.c339
-rw-r--r--drivers/message/i2o/driver.c12
-rw-r--r--drivers/message/i2o/exec-osm.c114
-rw-r--r--drivers/message/i2o/i2o_block.c190
-rw-r--r--drivers/message/i2o/i2o_config.c196
-rw-r--r--drivers/message/i2o/i2o_lan.h38
-rw-r--r--drivers/message/i2o/i2o_proc.c2
-rw-r--r--drivers/message/i2o/i2o_scsi.c89
-rw-r--r--drivers/message/i2o/iop.c356
-rw-r--r--drivers/message/i2o/pci.c7
22 files changed, 1229 insertions, 1296 deletions
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 4262a22adc22..537836068c49 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -313,13 +313,13 @@ mpt_reply(MPT_ADAPTER *ioc, u32 pa)
313 u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo); 313 u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
314 if (ioc->bus_type == FC) 314 if (ioc->bus_type == FC)
315 mpt_fc_log_info(ioc, log_info); 315 mpt_fc_log_info(ioc, log_info);
316 else if (ioc->bus_type == SCSI) 316 else if (ioc->bus_type == SPI)
317 mpt_sp_log_info(ioc, log_info); 317 mpt_sp_log_info(ioc, log_info);
318 else if (ioc->bus_type == SAS) 318 else if (ioc->bus_type == SAS)
319 mpt_sas_log_info(ioc, log_info); 319 mpt_sas_log_info(ioc, log_info);
320 } 320 }
321 if (ioc_stat & MPI_IOCSTATUS_MASK) { 321 if (ioc_stat & MPI_IOCSTATUS_MASK) {
322 if (ioc->bus_type == SCSI && 322 if (ioc->bus_type == SPI &&
323 cb_idx != mpt_stm_index && 323 cb_idx != mpt_stm_index &&
324 cb_idx != mpt_lan_index) 324 cb_idx != mpt_lan_index)
325 mpt_sp_ioc_info(ioc, (u32)ioc_stat, mf); 325 mpt_sp_ioc_info(ioc, (u32)ioc_stat, mf);
@@ -1376,7 +1376,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1376 } 1376 }
1377 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_53C1030) { 1377 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_53C1030) {
1378 ioc->prod_name = "LSI53C1030"; 1378 ioc->prod_name = "LSI53C1030";
1379 ioc->bus_type = SCSI; 1379 ioc->bus_type = SPI;
1380 /* 1030 Chip Fix. Disable Split transactions 1380 /* 1030 Chip Fix. Disable Split transactions
1381 * for PCIX. Set MOST bits to zero if Rev < C0( = 8). 1381 * for PCIX. Set MOST bits to zero if Rev < C0( = 8).
1382 */ 1382 */
@@ -1389,7 +1389,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1389 } 1389 }
1390 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_1030_53C1035) { 1390 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_1030_53C1035) {
1391 ioc->prod_name = "LSI53C1035"; 1391 ioc->prod_name = "LSI53C1035";
1392 ioc->bus_type = SCSI; 1392 ioc->bus_type = SPI;
1393 } 1393 }
1394 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064) { 1394 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064) {
1395 ioc->prod_name = "LSISAS1064"; 1395 ioc->prod_name = "LSISAS1064";
@@ -3042,7 +3042,7 @@ mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag)
3042 /* Clear the internal flash bad bit - autoincrementing register, 3042 /* Clear the internal flash bad bit - autoincrementing register,
3043 * so must do two writes. 3043 * so must do two writes.
3044 */ 3044 */
3045 if (ioc->bus_type == SCSI) { 3045 if (ioc->bus_type == SPI) {
3046 /* 3046 /*
3047 * 1030 and 1035 H/W errata, workaround to access 3047 * 1030 and 1035 H/W errata, workaround to access
3048 * the ClearFlashBadSignatureBit 3048 * the ClearFlashBadSignatureBit
@@ -3152,7 +3152,7 @@ KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
3152 int cnt,cntdn; 3152 int cnt,cntdn;
3153 3153
3154 dinitprintk((KERN_WARNING MYNAM ": KickStarting %s!\n", ioc->name)); 3154 dinitprintk((KERN_WARNING MYNAM ": KickStarting %s!\n", ioc->name));
3155 if (ioc->bus_type == SCSI) { 3155 if (ioc->bus_type == SPI) {
3156 /* Always issue a Msg Unit Reset first. This will clear some 3156 /* Always issue a Msg Unit Reset first. This will clear some
3157 * SCSI bus hang conditions. 3157 * SCSI bus hang conditions.
3158 */ 3158 */
@@ -3580,7 +3580,7 @@ initChainBuffers(MPT_ADAPTER *ioc)
3580 dinitprintk((KERN_INFO MYNAM ": %s Now numSGE=%d num_sge=%d num_chain=%d\n", 3580 dinitprintk((KERN_INFO MYNAM ": %s Now numSGE=%d num_sge=%d num_chain=%d\n",
3581 ioc->name, numSGE, num_sge, num_chain)); 3581 ioc->name, numSGE, num_sge, num_chain));
3582 3582
3583 if (ioc->bus_type == SCSI) 3583 if (ioc->bus_type == SPI)
3584 num_chain *= MPT_SCSI_CAN_QUEUE; 3584 num_chain *= MPT_SCSI_CAN_QUEUE;
3585 else 3585 else
3586 num_chain *= MPT_FC_CAN_QUEUE; 3586 num_chain *= MPT_FC_CAN_QUEUE;
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index bac8eb4186d2..6c48d1f54ac9 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.03.04" 79#define MPT_LINUX_VERSION_COMMON "3.03.05"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.04" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.05"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
@@ -321,7 +321,7 @@ typedef struct _SYSIF_REGS
321 * Dynamic Multi-Pathing specific stuff... 321 * Dynamic Multi-Pathing specific stuff...
322 */ 322 */
323 323
324/* VirtDevice negoFlags field */ 324/* VirtTarget negoFlags field */
325#define MPT_TARGET_NO_NEGO_WIDE 0x01 325#define MPT_TARGET_NO_NEGO_WIDE 0x01
326#define MPT_TARGET_NO_NEGO_SYNC 0x02 326#define MPT_TARGET_NO_NEGO_SYNC 0x02
327#define MPT_TARGET_NO_NEGO_QAS 0x04 327#define MPT_TARGET_NO_NEGO_QAS 0x04
@@ -330,8 +330,7 @@ typedef struct _SYSIF_REGS
330/* 330/*
331 * VirtDevice - FC LUN device or SCSI target device 331 * VirtDevice - FC LUN device or SCSI target device
332 */ 332 */
333typedef struct _VirtDevice { 333typedef struct _VirtTarget {
334 struct scsi_device *device;
335 u8 tflags; 334 u8 tflags;
336 u8 ioc_id; 335 u8 ioc_id;
337 u8 target_id; 336 u8 target_id;
@@ -342,21 +341,18 @@ typedef struct _VirtDevice {
342 u8 negoFlags; /* bit field, see above */ 341 u8 negoFlags; /* bit field, see above */
343 u8 raidVolume; /* set, if RAID Volume */ 342 u8 raidVolume; /* set, if RAID Volume */
344 u8 type; /* byte 0 of Inquiry data */ 343 u8 type; /* byte 0 of Inquiry data */
345 u8 cflags; /* controller flags */
346 u8 rsvd1raid;
347 u16 fc_phys_lun;
348 u16 fc_xlat_lun;
349 u32 num_luns; 344 u32 num_luns;
350 u32 luns[8]; /* Max LUNs is 256 */ 345 u32 luns[8]; /* Max LUNs is 256 */
351 u8 pad[4];
352 u8 inq_data[8]; 346 u8 inq_data[8];
353 /* IEEE Registered Extended Identifier 347} VirtTarget;
354 obtained via INQUIRY VPD page 0x83 */ 348
355 /* NOTE: Do not separate uniq_prepad and uniq_data 349typedef struct _VirtDevice {
356 as they are treateed as a single entity in the code */ 350 VirtTarget *vtarget;
357 u8 uniq_prepad[8]; 351 u8 ioc_id;
358 u8 uniq_data[20]; 352 u8 bus_id;
359 u8 pad2[4]; 353 u8 target_id;
354 u8 configured_lun;
355 u32 lun;
360} VirtDevice; 356} VirtDevice;
361 357
362/* 358/*
@@ -903,7 +899,7 @@ typedef struct _MPT_LOCAL_REPLY {
903 899
904typedef enum { 900typedef enum {
905 FC, 901 FC,
906 SCSI, 902 SPI,
907 SAS 903 SAS
908} BUS_TYPE; 904} BUS_TYPE;
909 905
@@ -912,7 +908,7 @@ typedef struct _MPT_SCSI_HOST {
912 int port; 908 int port;
913 u32 pad0; 909 u32 pad0;
914 struct scsi_cmnd **ScsiLookup; 910 struct scsi_cmnd **ScsiLookup;
915 VirtDevice **Targets; 911 VirtTarget **Targets;
916 MPT_LOCAL_REPLY *pLocal; /* used for internal commands */ 912 MPT_LOCAL_REPLY *pLocal; /* used for internal commands */
917 struct timer_list timer; 913 struct timer_list timer;
918 /* Pool of memory for holding SCpnts before doing 914 /* Pool of memory for holding SCpnts before doing
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 602138f8544d..959d2c5951b8 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1245,7 +1245,7 @@ mptctl_gettargetinfo (unsigned long arg)
1245 MPT_ADAPTER *ioc; 1245 MPT_ADAPTER *ioc;
1246 struct Scsi_Host *sh; 1246 struct Scsi_Host *sh;
1247 MPT_SCSI_HOST *hd; 1247 MPT_SCSI_HOST *hd;
1248 VirtDevice *vdev; 1248 VirtTarget *vdev;
1249 char *pmem; 1249 char *pmem;
1250 int *pdata; 1250 int *pdata;
1251 IOCPage2_t *pIoc2; 1251 IOCPage2_t *pIoc2;
@@ -1822,7 +1822,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1822 case MPI_FUNCTION_SCSI_IO_REQUEST: 1822 case MPI_FUNCTION_SCSI_IO_REQUEST:
1823 if (ioc->sh) { 1823 if (ioc->sh) {
1824 SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf; 1824 SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf;
1825 VirtDevice *pTarget = NULL; 1825 VirtTarget *pTarget = NULL;
1826 MPT_SCSI_HOST *hd = NULL; 1826 MPT_SCSI_HOST *hd = NULL;
1827 int qtag = MPI_SCSIIO_CONTROL_UNTAGGED; 1827 int qtag = MPI_SCSIIO_CONTROL_UNTAGGED;
1828 int scsidir = 0; 1828 int scsidir = 0;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index a628be9bbbad..ba61e1828858 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -84,13 +84,16 @@ static int mptfcTaskCtx = -1;
84static int mptfcInternalCtx = -1; /* Used only for internal commands */ 84static int mptfcInternalCtx = -1; /* Used only for internal commands */
85 85
86static struct scsi_host_template mptfc_driver_template = { 86static struct scsi_host_template mptfc_driver_template = {
87 .module = THIS_MODULE,
87 .proc_name = "mptfc", 88 .proc_name = "mptfc",
88 .proc_info = mptscsih_proc_info, 89 .proc_info = mptscsih_proc_info,
89 .name = "MPT FC Host", 90 .name = "MPT FC Host",
90 .info = mptscsih_info, 91 .info = mptscsih_info,
91 .queuecommand = mptscsih_qcmd, 92 .queuecommand = mptscsih_qcmd,
93 .target_alloc = mptscsih_target_alloc,
92 .slave_alloc = mptscsih_slave_alloc, 94 .slave_alloc = mptscsih_slave_alloc,
93 .slave_configure = mptscsih_slave_configure, 95 .slave_configure = mptscsih_slave_configure,
96 .target_destroy = mptscsih_target_destroy,
94 .slave_destroy = mptscsih_slave_destroy, 97 .slave_destroy = mptscsih_slave_destroy,
95 .change_queue_depth = mptscsih_change_queue_depth, 98 .change_queue_depth = mptscsih_change_queue_depth,
96 .eh_abort_handler = mptscsih_abort, 99 .eh_abort_handler = mptscsih_abort,
@@ -167,13 +170,15 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
167 printk(MYIOC_s_WARN_FMT 170 printk(MYIOC_s_WARN_FMT
168 "Skipping because it's not operational!\n", 171 "Skipping because it's not operational!\n",
169 ioc->name); 172 ioc->name);
170 return -ENODEV; 173 error = -ENODEV;
174 goto out_mptfc_probe;
171 } 175 }
172 176
173 if (!ioc->active) { 177 if (!ioc->active) {
174 printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n", 178 printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
175 ioc->name); 179 ioc->name);
176 return -ENODEV; 180 error = -ENODEV;
181 goto out_mptfc_probe;
177 } 182 }
178 183
179 /* Sanity check - ensure at least 1 port is INITIATOR capable 184 /* Sanity check - ensure at least 1 port is INITIATOR capable
@@ -198,7 +203,8 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
198 printk(MYIOC_s_WARN_FMT 203 printk(MYIOC_s_WARN_FMT
199 "Unable to register controller with SCSI subsystem\n", 204 "Unable to register controller with SCSI subsystem\n",
200 ioc->name); 205 ioc->name);
201 return -1; 206 error = -1;
207 goto out_mptfc_probe;
202 } 208 }
203 209
204 spin_lock_irqsave(&ioc->FreeQlock, flags); 210 spin_lock_irqsave(&ioc->FreeQlock, flags);
@@ -266,7 +272,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
266 mem = kmalloc(sz, GFP_ATOMIC); 272 mem = kmalloc(sz, GFP_ATOMIC);
267 if (mem == NULL) { 273 if (mem == NULL) {
268 error = -ENOMEM; 274 error = -ENOMEM;
269 goto mptfc_probe_failed; 275 goto out_mptfc_probe;
270 } 276 }
271 277
272 memset(mem, 0, sz); 278 memset(mem, 0, sz);
@@ -284,14 +290,14 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
284 mem = kmalloc(sz, GFP_ATOMIC); 290 mem = kmalloc(sz, GFP_ATOMIC);
285 if (mem == NULL) { 291 if (mem == NULL) {
286 error = -ENOMEM; 292 error = -ENOMEM;
287 goto mptfc_probe_failed; 293 goto out_mptfc_probe;
288 } 294 }
289 295
290 memset(mem, 0, sz); 296 memset(mem, 0, sz);
291 hd->Targets = (VirtDevice **) mem; 297 hd->Targets = (VirtTarget **) mem;
292 298
293 dprintk((KERN_INFO 299 dprintk((KERN_INFO
294 " Targets @ %p, sz=%d\n", hd->Targets, sz)); 300 " vdev @ %p, sz=%d\n", hd->Targets, sz));
295 301
296 /* Clear the TM flags 302 /* Clear the TM flags
297 */ 303 */
@@ -330,13 +336,13 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
330 if(error) { 336 if(error) {
331 dprintk((KERN_ERR MYNAM 337 dprintk((KERN_ERR MYNAM
332 "scsi_add_host failed\n")); 338 "scsi_add_host failed\n"));
333 goto mptfc_probe_failed; 339 goto out_mptfc_probe;
334 } 340 }
335 341
336 scsi_scan_host(sh); 342 scsi_scan_host(sh);
337 return 0; 343 return 0;
338 344
339mptfc_probe_failed: 345out_mptfc_probe:
340 346
341 mptscsih_remove(pdev); 347 mptscsih_remove(pdev);
342 return error; 348 return error;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index e0a8bb8ba7d8..17e9757e728b 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -228,31 +228,35 @@ static void mptsas_print_expander_pg1(SasExpanderPage1_t *pg1)
228 * implement ->target_alloc. 228 * implement ->target_alloc.
229 */ 229 */
230static int 230static int
231mptsas_slave_alloc(struct scsi_device *device) 231mptsas_slave_alloc(struct scsi_device *sdev)
232{ 232{
233 struct Scsi_Host *host = device->host; 233 struct Scsi_Host *host = sdev->host;
234 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata; 234 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
235 struct sas_rphy *rphy; 235 struct sas_rphy *rphy;
236 struct mptsas_portinfo *p; 236 struct mptsas_portinfo *p;
237 VirtTarget *vtarget;
237 VirtDevice *vdev; 238 VirtDevice *vdev;
238 uint target = device->id; 239 struct scsi_target *starget;
239 int i; 240 int i;
240 241
241 if ((vdev = hd->Targets[target]) != NULL)
242 goto out;
243
244 vdev = kmalloc(sizeof(VirtDevice), GFP_KERNEL); 242 vdev = kmalloc(sizeof(VirtDevice), GFP_KERNEL);
245 if (!vdev) { 243 if (!vdev) {
246 printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n", 244 printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
247 hd->ioc->name, sizeof(VirtDevice)); 245 hd->ioc->name, sizeof(VirtDevice));
248 return -ENOMEM; 246 return -ENOMEM;
249 } 247 }
250
251 memset(vdev, 0, sizeof(VirtDevice)); 248 memset(vdev, 0, sizeof(VirtDevice));
252 vdev->tflags = MPT_TARGET_FLAGS_Q_YES|MPT_TARGET_FLAGS_VALID_INQUIRY;
253 vdev->ioc_id = hd->ioc->id; 249 vdev->ioc_id = hd->ioc->id;
250 sdev->hostdata = vdev;
251 starget = scsi_target(sdev);
252 vtarget = starget->hostdata;
253 vdev->vtarget = vtarget;
254 if (vtarget->num_luns == 0) {
255 vtarget->tflags = MPT_TARGET_FLAGS_Q_YES|MPT_TARGET_FLAGS_VALID_INQUIRY;
256 hd->Targets[sdev->id] = vtarget;
257 }
254 258
255 rphy = dev_to_rphy(device->sdev_target->dev.parent); 259 rphy = dev_to_rphy(sdev->sdev_target->dev.parent);
256 list_for_each_entry(p, &hd->ioc->sas_topology, list) { 260 list_for_each_entry(p, &hd->ioc->sas_topology, list) {
257 for (i = 0; i < p->num_phys; i++) { 261 for (i = 0; i < p->num_phys; i++) {
258 if (p->phy_info[i].attached.sas_address == 262 if (p->phy_info[i].attached.sas_address ==
@@ -260,7 +264,7 @@ mptsas_slave_alloc(struct scsi_device *device)
260 vdev->target_id = 264 vdev->target_id =
261 p->phy_info[i].attached.target; 265 p->phy_info[i].attached.target;
262 vdev->bus_id = p->phy_info[i].attached.bus; 266 vdev->bus_id = p->phy_info[i].attached.bus;
263 hd->Targets[device->id] = vdev; 267 vdev->lun = sdev->lun;
264 goto out; 268 goto out;
265 } 269 }
266 } 270 }
@@ -271,19 +275,24 @@ mptsas_slave_alloc(struct scsi_device *device)
271 return -ENODEV; 275 return -ENODEV;
272 276
273 out: 277 out:
274 vdev->num_luns++; 278 vtarget->ioc_id = vdev->ioc_id;
275 device->hostdata = vdev; 279 vtarget->target_id = vdev->target_id;
280 vtarget->bus_id = vdev->bus_id;
281 vtarget->num_luns++;
276 return 0; 282 return 0;
277} 283}
278 284
279static struct scsi_host_template mptsas_driver_template = { 285static struct scsi_host_template mptsas_driver_template = {
286 .module = THIS_MODULE,
280 .proc_name = "mptsas", 287 .proc_name = "mptsas",
281 .proc_info = mptscsih_proc_info, 288 .proc_info = mptscsih_proc_info,
282 .name = "MPT SPI Host", 289 .name = "MPT SPI Host",
283 .info = mptscsih_info, 290 .info = mptscsih_info,
284 .queuecommand = mptscsih_qcmd, 291 .queuecommand = mptscsih_qcmd,
292 .target_alloc = mptscsih_target_alloc,
285 .slave_alloc = mptsas_slave_alloc, 293 .slave_alloc = mptsas_slave_alloc,
286 .slave_configure = mptscsih_slave_configure, 294 .slave_configure = mptscsih_slave_configure,
295 .target_destroy = mptscsih_target_destroy,
287 .slave_destroy = mptscsih_slave_destroy, 296 .slave_destroy = mptscsih_slave_destroy,
288 .change_queue_depth = mptscsih_change_queue_depth, 297 .change_queue_depth = mptscsih_change_queue_depth,
289 .eh_abort_handler = mptscsih_abort, 298 .eh_abort_handler = mptscsih_abort,
@@ -986,7 +995,6 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc, int *index)
986 goto out_free_port_info; 995 goto out_free_port_info;
987 996
988 list_add_tail(&port_info->list, &ioc->sas_topology); 997 list_add_tail(&port_info->list, &ioc->sas_topology);
989
990 for (i = 0; i < port_info->num_phys; i++) { 998 for (i = 0; i < port_info->num_phys; i++) {
991 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i], 999 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
992 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 1000 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
@@ -1133,13 +1141,15 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1133 printk(MYIOC_s_WARN_FMT 1141 printk(MYIOC_s_WARN_FMT
1134 "Skipping because it's not operational!\n", 1142 "Skipping because it's not operational!\n",
1135 ioc->name); 1143 ioc->name);
1136 return -ENODEV; 1144 error = -ENODEV;
1145 goto out_mptsas_probe;
1137 } 1146 }
1138 1147
1139 if (!ioc->active) { 1148 if (!ioc->active) {
1140 printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n", 1149 printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
1141 ioc->name); 1150 ioc->name);
1142 return -ENODEV; 1151 error = -ENODEV;
1152 goto out_mptsas_probe;
1143 } 1153 }
1144 1154
1145 /* Sanity check - ensure at least 1 port is INITIATOR capable 1155 /* Sanity check - ensure at least 1 port is INITIATOR capable
@@ -1163,7 +1173,8 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1163 printk(MYIOC_s_WARN_FMT 1173 printk(MYIOC_s_WARN_FMT
1164 "Unable to register controller with SCSI subsystem\n", 1174 "Unable to register controller with SCSI subsystem\n",
1165 ioc->name); 1175 ioc->name);
1166 return -1; 1176 error = -1;
1177 goto out_mptsas_probe;
1167 } 1178 }
1168 1179
1169 spin_lock_irqsave(&ioc->FreeQlock, flags); 1180 spin_lock_irqsave(&ioc->FreeQlock, flags);
@@ -1237,7 +1248,7 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1237 mem = kmalloc(sz, GFP_ATOMIC); 1248 mem = kmalloc(sz, GFP_ATOMIC);
1238 if (mem == NULL) { 1249 if (mem == NULL) {
1239 error = -ENOMEM; 1250 error = -ENOMEM;
1240 goto mptsas_probe_failed; 1251 goto out_mptsas_probe;
1241 } 1252 }
1242 1253
1243 memset(mem, 0, sz); 1254 memset(mem, 0, sz);
@@ -1255,14 +1266,14 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1255 mem = kmalloc(sz, GFP_ATOMIC); 1266 mem = kmalloc(sz, GFP_ATOMIC);
1256 if (mem == NULL) { 1267 if (mem == NULL) {
1257 error = -ENOMEM; 1268 error = -ENOMEM;
1258 goto mptsas_probe_failed; 1269 goto out_mptsas_probe;
1259 } 1270 }
1260 1271
1261 memset(mem, 0, sz); 1272 memset(mem, 0, sz);
1262 hd->Targets = (VirtDevice **) mem; 1273 hd->Targets = (VirtTarget **) mem;
1263 1274
1264 dprintk((KERN_INFO 1275 dprintk((KERN_INFO
1265 " Targets @ %p, sz=%d\n", hd->Targets, sz)); 1276 " vtarget @ %p, sz=%d\n", hd->Targets, sz));
1266 1277
1267 /* Clear the TM flags 1278 /* Clear the TM flags
1268 */ 1279 */
@@ -1308,14 +1319,14 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1308 if (error) { 1319 if (error) {
1309 dprintk((KERN_ERR MYNAM 1320 dprintk((KERN_ERR MYNAM
1310 "scsi_add_host failed\n")); 1321 "scsi_add_host failed\n"));
1311 goto mptsas_probe_failed; 1322 goto out_mptsas_probe;
1312 } 1323 }
1313 1324
1314 mptsas_scan_sas_topology(ioc); 1325 mptsas_scan_sas_topology(ioc);
1315 1326
1316 return 0; 1327 return 0;
1317 1328
1318mptsas_probe_failed: 1329out_mptsas_probe:
1319 1330
1320 mptscsih_remove(pdev); 1331 mptscsih_remove(pdev);
1321 return error; 1332 return error;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index b7b9846ff3fd..93a16fa3c4ba 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -150,28 +150,29 @@ static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 tar
150int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); 150int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
151int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); 151int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
152 152
153static void mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *data, int dlen); 153static void mptscsih_initTarget(MPT_SCSI_HOST *hd, VirtTarget *vtarget, u8 lun, char *data, int dlen);
154static void mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56); 154static void mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *vtarget, char byte56);
155static void mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq);
156static void mptscsih_setDevicePage1Flags (u8 width, u8 factor, u8 offset, int *requestedPtr, int *configurationPtr, u8 flags); 155static void mptscsih_setDevicePage1Flags (u8 width, u8 factor, u8 offset, int *requestedPtr, int *configurationPtr, u8 flags);
157static void mptscsih_no_negotiate(MPT_SCSI_HOST *hd, int target_id); 156static void mptscsih_no_negotiate(MPT_SCSI_HOST *hd, struct scsi_cmnd *sc);
158static int mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target, int flags); 157static int mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target, int flags);
159static int mptscsih_writeIOCPage4(MPT_SCSI_HOST *hd, int target_id, int bus); 158static int mptscsih_writeIOCPage4(MPT_SCSI_HOST *hd, int target_id, int bus);
160int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); 159int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
161static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd); 160static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
162static int mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum); 161static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice);
162static void mptscsih_negotiate_to_asyn_narrow(MPT_SCSI_HOST *hd, VirtTarget *vtarget);
163static int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id);
163 164
164static struct work_struct mptscsih_persistTask; 165static struct work_struct mptscsih_persistTask;
165 166
166#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION 167#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
167static int mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io); 168static int mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io);
168static void mptscsih_domainValidation(void *hd); 169static void mptscsih_domainValidation(void *hd);
169static int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id);
170static void mptscsih_qas_check(MPT_SCSI_HOST *hd, int id); 170static void mptscsih_qas_check(MPT_SCSI_HOST *hd, int id);
171static int mptscsih_doDv(MPT_SCSI_HOST *hd, int channel, int target); 171static int mptscsih_doDv(MPT_SCSI_HOST *hd, int channel, int target);
172static void mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage); 172static void mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage);
173static void mptscsih_fillbuf(char *buffer, int size, int index, int width); 173static void mptscsih_fillbuf(char *buffer, int size, int index, int width);
174static void mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id); 174static void mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id);
175static void mptscsih_set_dvflags(MPT_SCSI_HOST *hd, struct scsi_cmnd *sc);
175#endif 176#endif
176 177
177void mptscsih_remove(struct pci_dev *); 178void mptscsih_remove(struct pci_dev *);
@@ -627,7 +628,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
627 dreplyprintk((KERN_NOTICE "Reply ha=%d id=%d lun=%d:\n" 628 dreplyprintk((KERN_NOTICE "Reply ha=%d id=%d lun=%d:\n"
628 "IOCStatus=%04xh SCSIState=%02xh SCSIStatus=%02xh\n" 629 "IOCStatus=%04xh SCSIState=%02xh SCSIStatus=%02xh\n"
629 "resid=%d bufflen=%d xfer_cnt=%d\n", 630 "resid=%d bufflen=%d xfer_cnt=%d\n",
630 ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1], 631 ioc->id, sc->device->id, sc->device->lun,
631 status, scsi_state, scsi_status, sc->resid, 632 status, scsi_state, scsi_status, sc->resid,
632 sc->request_bufflen, xfer_cnt)); 633 sc->request_bufflen, xfer_cnt));
633 634
@@ -641,7 +642,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
641 pScsiReply->ResponseInfo) { 642 pScsiReply->ResponseInfo) {
642 printk(KERN_NOTICE "ha=%d id=%d lun=%d: " 643 printk(KERN_NOTICE "ha=%d id=%d lun=%d: "
643 "FCP_ResponseInfo=%08xh\n", 644 "FCP_ResponseInfo=%08xh\n",
644 ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1], 645 ioc->id, sc->device->id, sc->device->lun,
645 le32_to_cpu(pScsiReply->ResponseInfo)); 646 le32_to_cpu(pScsiReply->ResponseInfo));
646 } 647 }
647 648
@@ -677,8 +678,8 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
677 sc->result = DID_RESET << 16; 678 sc->result = DID_RESET << 16;
678 679
679 /* GEM Workaround. */ 680 /* GEM Workaround. */
680 if (ioc->bus_type == SCSI) 681 if (ioc->bus_type == SPI)
681 mptscsih_no_negotiate(hd, sc->device->id); 682 mptscsih_no_negotiate(hd, sc);
682 break; 683 break;
683 684
684 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */ 685 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
@@ -892,16 +893,15 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
892 * when a lun is disable by mid-layer. 893 * when a lun is disable by mid-layer.
893 * Do NOT access the referenced scsi_cmnd structure or 894 * Do NOT access the referenced scsi_cmnd structure or
894 * members. Will cause either a paging or NULL ptr error. 895 * members. Will cause either a paging or NULL ptr error.
895 * @hd: Pointer to a SCSI HOST structure 896 * @hd: Pointer to a SCSI HOST structure
896 * @target: target id 897 * @vdevice: per device private data
897 * @lun: lun
898 * 898 *
899 * Returns: None. 899 * Returns: None.
900 * 900 *
901 * Called from slave_destroy. 901 * Called from slave_destroy.
902 */ 902 */
903static void 903static void
904mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun) 904mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
905{ 905{
906 SCSIIORequest_t *mf = NULL; 906 SCSIIORequest_t *mf = NULL;
907 int ii; 907 int ii;
@@ -909,7 +909,7 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
909 struct scsi_cmnd *sc; 909 struct scsi_cmnd *sc;
910 910
911 dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n", 911 dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n",
912 target, lun, max)); 912 vdevice->target_id, vdevice->lun, max));
913 913
914 for (ii=0; ii < max; ii++) { 914 for (ii=0; ii < max; ii++) {
915 if ((sc = hd->ScsiLookup[ii]) != NULL) { 915 if ((sc = hd->ScsiLookup[ii]) != NULL) {
@@ -919,7 +919,7 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
919 dsprintk(( "search_running: found (sc=%p, mf = %p) target %d, lun %d \n", 919 dsprintk(( "search_running: found (sc=%p, mf = %p) target %d, lun %d \n",
920 hd->ScsiLookup[ii], mf, mf->TargetID, mf->LUN[1])); 920 hd->ScsiLookup[ii], mf, mf->TargetID, mf->LUN[1]));
921 921
922 if ((mf->TargetID != ((u8)target)) || (mf->LUN[1] != ((u8) lun))) 922 if ((mf->TargetID != ((u8)vdevice->target_id)) || (mf->LUN[1] != ((u8) vdevice->lun)))
923 continue; 923 continue;
924 924
925 /* Cleanup 925 /* Cleanup
@@ -993,8 +993,10 @@ mptscsih_remove(struct pci_dev *pdev)
993 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 993 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
994 struct Scsi_Host *host = ioc->sh; 994 struct Scsi_Host *host = ioc->sh;
995 MPT_SCSI_HOST *hd; 995 MPT_SCSI_HOST *hd;
996#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
996 int count; 997 int count;
997 unsigned long flags; 998 unsigned long flags;
999#endif
998 int sz1; 1000 int sz1;
999 1001
1000 if(!host) { 1002 if(!host) {
@@ -1075,11 +1077,6 @@ mptscsih_shutdown(struct pci_dev *pdev)
1075 1077
1076 hd = (MPT_SCSI_HOST *)host->hostdata; 1078 hd = (MPT_SCSI_HOST *)host->hostdata;
1077 1079
1078 /* Flush the cache of this adapter
1079 */
1080 if(hd != NULL)
1081 mptscsih_synchronize_cache(hd, 0);
1082
1083} 1080}
1084 1081
1085#ifdef CONFIG_PM 1082#ifdef CONFIG_PM
@@ -1286,7 +1283,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1286 MPT_SCSI_HOST *hd; 1283 MPT_SCSI_HOST *hd;
1287 MPT_FRAME_HDR *mf; 1284 MPT_FRAME_HDR *mf;
1288 SCSIIORequest_t *pScsiReq; 1285 SCSIIORequest_t *pScsiReq;
1289 VirtDevice *pTarget = SCpnt->device->hostdata; 1286 VirtDevice *vdev = SCpnt->device->hostdata;
1290 int lun; 1287 int lun;
1291 u32 datalen; 1288 u32 datalen;
1292 u32 scsictl; 1289 u32 scsictl;
@@ -1341,8 +1338,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1341 /* Default to untagged. Once a target structure has been allocated, 1338 /* Default to untagged. Once a target structure has been allocated,
1342 * use the Inquiry data to determine if device supports tagged. 1339 * use the Inquiry data to determine if device supports tagged.
1343 */ 1340 */
1344 if (pTarget 1341 if (vdev
1345 && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES) 1342 && (vdev->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
1346 && (SCpnt->device->tagged_supported)) { 1343 && (SCpnt->device->tagged_supported)) {
1347 scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ; 1344 scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
1348 } else { 1345 } else {
@@ -1351,8 +1348,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1351 1348
1352 /* Use the above information to set up the message frame 1349 /* Use the above information to set up the message frame
1353 */ 1350 */
1354 pScsiReq->TargetID = (u8) pTarget->target_id; 1351 pScsiReq->TargetID = (u8) vdev->target_id;
1355 pScsiReq->Bus = pTarget->bus_id; 1352 pScsiReq->Bus = vdev->bus_id;
1356 pScsiReq->ChainOffset = 0; 1353 pScsiReq->ChainOffset = 0;
1357 pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 1354 pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
1358 pScsiReq->CDBLength = SCpnt->cmd_len; 1355 pScsiReq->CDBLength = SCpnt->cmd_len;
@@ -1403,8 +1400,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1403 SCpnt->host_scribble = NULL; 1400 SCpnt->host_scribble = NULL;
1404 1401
1405#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION 1402#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
1406 if (hd->ioc->bus_type == SCSI) { 1403 if (hd->ioc->bus_type == SPI) {
1407 int dvStatus = hd->ioc->spi_data.dvStatus[pTarget->target_id]; 1404 int dvStatus = hd->ioc->spi_data.dvStatus[vdev->target_id];
1408 int issueCmd = 1; 1405 int issueCmd = 1;
1409 1406
1410 if (dvStatus || hd->ioc->spi_data.forceDv) { 1407 if (dvStatus || hd->ioc->spi_data.forceDv) {
@@ -1437,7 +1434,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1437 /* Set the DV flags. 1434 /* Set the DV flags.
1438 */ 1435 */
1439 if (dvStatus & MPT_SCSICFG_DV_NOT_DONE) 1436 if (dvStatus & MPT_SCSICFG_DV_NOT_DONE)
1440 mptscsih_set_dvflags(hd, pScsiReq); 1437 mptscsih_set_dvflags(hd, SCpnt);
1441 1438
1442 if (!issueCmd) 1439 if (!issueCmd)
1443 goto fail; 1440 goto fail;
@@ -1741,6 +1738,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1741 u32 ctx2abort; 1738 u32 ctx2abort;
1742 int scpnt_idx; 1739 int scpnt_idx;
1743 int retval; 1740 int retval;
1741 VirtDevice *vdev;
1744 1742
1745 /* If we can't locate our host adapter structure, return FAILED status. 1743 /* If we can't locate our host adapter structure, return FAILED status.
1746 */ 1744 */
@@ -1790,8 +1788,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1790 1788
1791 hd->abortSCpnt = SCpnt; 1789 hd->abortSCpnt = SCpnt;
1792 1790
1791 vdev = SCpnt->device->hostdata;
1793 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 1792 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
1794 SCpnt->device->channel, SCpnt->device->id, SCpnt->device->lun, 1793 vdev->bus_id, vdev->target_id, vdev->lun,
1795 ctx2abort, 2 /* 2 second timeout */); 1794 ctx2abort, 2 /* 2 second timeout */);
1796 1795
1797 printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n", 1796 printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
@@ -1822,6 +1821,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1822{ 1821{
1823 MPT_SCSI_HOST *hd; 1822 MPT_SCSI_HOST *hd;
1824 int retval; 1823 int retval;
1824 VirtDevice *vdev;
1825 1825
1826 /* If we can't locate our host adapter structure, return FAILED status. 1826 /* If we can't locate our host adapter structure, return FAILED status.
1827 */ 1827 */
@@ -1839,8 +1839,9 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1839 hd->ioc->name, SCpnt); 1839 hd->ioc->name, SCpnt);
1840 scsi_print_command(SCpnt); 1840 scsi_print_command(SCpnt);
1841 1841
1842 vdev = SCpnt->device->hostdata;
1842 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 1843 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
1843 SCpnt->device->channel, SCpnt->device->id, 1844 vdev->bus_id, vdev->target_id,
1844 0, 0, 5 /* 5 second timeout */); 1845 0, 0, 5 /* 5 second timeout */);
1845 1846
1846 printk (KERN_WARNING MYNAM ": %s: target reset: %s (sc=%p)\n", 1847 printk (KERN_WARNING MYNAM ": %s: target reset: %s (sc=%p)\n",
@@ -1871,6 +1872,7 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
1871{ 1872{
1872 MPT_SCSI_HOST *hd; 1873 MPT_SCSI_HOST *hd;
1873 int retval; 1874 int retval;
1875 VirtDevice *vdev;
1874 1876
1875 /* If we can't locate our host adapter structure, return FAILED status. 1877 /* If we can't locate our host adapter structure, return FAILED status.
1876 */ 1878 */
@@ -1888,8 +1890,9 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
1888 if (hd->timeouts < -1) 1890 if (hd->timeouts < -1)
1889 hd->timeouts++; 1891 hd->timeouts++;
1890 1892
1893 vdev = SCpnt->device->hostdata;
1891 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 1894 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1892 SCpnt->device->channel, 0, 0, 0, 5 /* 5 second timeout */); 1895 vdev->bus_id, 0, 0, 0, 5 /* 5 second timeout */);
1893 1896
1894 printk (KERN_WARNING MYNAM ": %s: bus reset: %s (sc=%p)\n", 1897 printk (KERN_WARNING MYNAM ": %s: bus reset: %s (sc=%p)\n",
1895 hd->ioc->name, 1898 hd->ioc->name,
@@ -2151,23 +2154,36 @@ mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev,
2151/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2154/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2152/* 2155/*
2153 * OS entry point to allow host driver to alloc memory 2156 * OS entry point to allow host driver to alloc memory
2157 * for each scsi target. Called once per device the bus scan.
2158 * Return non-zero if allocation fails.
2159 */
2160int
2161mptscsih_target_alloc(struct scsi_target *starget)
2162{
2163 VirtTarget *vtarget;
2164
2165 vtarget = kmalloc(sizeof(VirtTarget), GFP_KERNEL);
2166 if (!vtarget)
2167 return -ENOMEM;
2168 memset(vtarget, 0, sizeof(VirtTarget));
2169 starget->hostdata = vtarget;
2170 return 0;
2171}
2172
2173/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2174/*
2175 * OS entry point to allow host driver to alloc memory
2154 * for each scsi device. Called once per device the bus scan. 2176 * for each scsi device. Called once per device the bus scan.
2155 * Return non-zero if allocation fails. 2177 * Return non-zero if allocation fails.
2156 * Init memory once per id (not LUN).
2157 */ 2178 */
2158int 2179int
2159mptscsih_slave_alloc(struct scsi_device *device) 2180mptscsih_slave_alloc(struct scsi_device *sdev)
2160{ 2181{
2161 struct Scsi_Host *host = device->host; 2182 struct Scsi_Host *host = sdev->host;
2162 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata; 2183 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
2184 VirtTarget *vtarget;
2163 VirtDevice *vdev; 2185 VirtDevice *vdev;
2164 uint target = device->id; 2186 struct scsi_target *starget;
2165
2166 if (hd == NULL)
2167 return -ENODEV;
2168
2169 if ((vdev = hd->Targets[target]) != NULL)
2170 goto out;
2171 2187
2172 vdev = kmalloc(sizeof(VirtDevice), GFP_KERNEL); 2188 vdev = kmalloc(sizeof(VirtDevice), GFP_KERNEL);
2173 if (!vdev) { 2189 if (!vdev) {
@@ -2177,25 +2193,33 @@ mptscsih_slave_alloc(struct scsi_device *device)
2177 } 2193 }
2178 2194
2179 memset(vdev, 0, sizeof(VirtDevice)); 2195 memset(vdev, 0, sizeof(VirtDevice));
2180 vdev->tflags = MPT_TARGET_FLAGS_Q_YES;
2181 vdev->ioc_id = hd->ioc->id; 2196 vdev->ioc_id = hd->ioc->id;
2182 vdev->target_id = device->id; 2197 vdev->target_id = sdev->id;
2183 vdev->bus_id = device->channel; 2198 vdev->bus_id = sdev->channel;
2184 vdev->raidVolume = 0; 2199 vdev->lun = sdev->lun;
2185 hd->Targets[device->id] = vdev; 2200 sdev->hostdata = vdev;
2186 if (hd->ioc->bus_type == SCSI) { 2201
2187 if (hd->ioc->raid_data.isRaid & (1 << device->id)) { 2202 starget = scsi_target(sdev);
2188 vdev->raidVolume = 1; 2203 vtarget = starget->hostdata;
2189 ddvtprintk((KERN_INFO 2204 vdev->vtarget = vtarget;
2190 "RAID Volume @ id %d\n", device->id)); 2205
2206 if (vtarget->num_luns == 0) {
2207 hd->Targets[sdev->id] = vtarget;
2208 vtarget->ioc_id = hd->ioc->id;
2209 vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
2210 vtarget->target_id = sdev->id;
2211 vtarget->bus_id = sdev->channel;
2212 if (hd->ioc->bus_type == SPI) {
2213 if (hd->ioc->raid_data.isRaid & (1 << sdev->id)) {
2214 vtarget->raidVolume = 1;
2215 ddvtprintk((KERN_INFO
2216 "RAID Volume @ id %d\n", sdev->id));
2217 }
2218 } else {
2219 vtarget->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
2191 } 2220 }
2192 } else {
2193 vdev->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
2194 } 2221 }
2195 2222 vtarget->num_luns++;
2196 out:
2197 vdev->num_luns++;
2198 device->hostdata = vdev;
2199 return 0; 2223 return 0;
2200} 2224}
2201 2225
@@ -2204,40 +2228,52 @@ mptscsih_slave_alloc(struct scsi_device *device)
2204 * Called if no device present or device being unloaded 2228 * Called if no device present or device being unloaded
2205 */ 2229 */
2206void 2230void
2207mptscsih_slave_destroy(struct scsi_device *device) 2231mptscsih_target_destroy(struct scsi_target *starget)
2208{ 2232{
2209 struct Scsi_Host *host = device->host; 2233 if (starget->hostdata)
2210 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata; 2234 kfree(starget->hostdata);
2211 VirtDevice *vdev; 2235 starget->hostdata = NULL;
2212 uint target = device->id; 2236}
2213 uint lun = device->lun;
2214
2215 if (hd == NULL)
2216 return;
2217
2218 mptscsih_search_running_cmds(hd, target, lun);
2219
2220 vdev = hd->Targets[target];
2221 vdev->luns[0] &= ~(1 << lun);
2222 if (--vdev->num_luns)
2223 return;
2224
2225 kfree(hd->Targets[target]);
2226 hd->Targets[target] = NULL;
2227
2228 if (hd->ioc->bus_type == SCSI) {
2229 if (mptscsih_is_phys_disk(hd->ioc, target)) {
2230 hd->ioc->spi_data.forceDv |= MPT_SCSICFG_RELOAD_IOC_PG3;
2231 } else {
2232 hd->ioc->spi_data.dvStatus[target] =
2233 MPT_SCSICFG_NEGOTIATE;
2234 2237
2235 if (!hd->negoNvram) { 2238/*
2236 hd->ioc->spi_data.dvStatus[target] |= 2239 * OS entry point to allow for host driver to free allocated memory
2237 MPT_SCSICFG_DV_NOT_DONE; 2240 * Called if no device present or device being unloaded
2241 */
2242void
2243mptscsih_slave_destroy(struct scsi_device *sdev)
2244{
2245 struct Scsi_Host *host = sdev->host;
2246 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
2247 VirtTarget *vtarget;
2248 VirtDevice *vdevice;
2249 struct scsi_target *starget;
2250
2251 starget = scsi_target(sdev);
2252 vtarget = starget->hostdata;
2253 vdevice = sdev->hostdata;
2254
2255 mptscsih_search_running_cmds(hd, vdevice);
2256 vtarget->luns[0] &= ~(1 << vdevice->lun);
2257 vtarget->num_luns--;
2258 if (vtarget->num_luns == 0) {
2259 mptscsih_negotiate_to_asyn_narrow(hd, vtarget);
2260 if (hd->ioc->bus_type == SPI) {
2261 if (mptscsih_is_phys_disk(hd->ioc, vtarget->target_id)) {
2262 hd->ioc->spi_data.forceDv |= MPT_SCSICFG_RELOAD_IOC_PG3;
2263 } else {
2264 hd->ioc->spi_data.dvStatus[vtarget->target_id] =
2265 MPT_SCSICFG_NEGOTIATE;
2266 if (!hd->negoNvram) {
2267 hd->ioc->spi_data.dvStatus[vtarget->target_id] |=
2268 MPT_SCSICFG_DV_NOT_DONE;
2269 }
2238 } 2270 }
2239 } 2271 }
2272 hd->Targets[sdev->id] = NULL;
2240 } 2273 }
2274 mptscsih_synchronize_cache(hd, vdevice);
2275 kfree(vdevice);
2276 sdev->hostdata = NULL;
2241} 2277}
2242 2278
2243/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2279/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2251,22 +2287,21 @@ mptscsih_slave_destroy(struct scsi_device *device)
2251int 2287int
2252mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 2288mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
2253{ 2289{
2254 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)sdev->host->hostdata; 2290 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)sdev->host->hostdata;
2255 VirtDevice *pTarget; 2291 VirtTarget *vtarget;
2256 int max_depth; 2292 struct scsi_target *starget;
2257 int tagged; 2293 int max_depth;
2258 2294 int tagged;
2259 if (hd == NULL) 2295
2260 return 0; 2296 starget = scsi_target(sdev);
2261 if (!(pTarget = hd->Targets[sdev->id])) 2297 vtarget = starget->hostdata;
2262 return 0; 2298
2263 2299 if (hd->ioc->bus_type == SPI) {
2264 if (hd->ioc->bus_type == SCSI) { 2300 if (vtarget->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY) {
2265 if (pTarget->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY) { 2301 if (!(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
2266 if (!(pTarget->tflags & MPT_TARGET_FLAGS_Q_YES))
2267 max_depth = 1; 2302 max_depth = 1;
2268 else if (((pTarget->inq_data[0] & 0x1f) == 0x00) && 2303 else if (((vtarget->inq_data[0] & 0x1f) == 0x00) &&
2269 (pTarget->minSyncFactor <= MPT_ULTRA160 )) 2304 (vtarget->minSyncFactor <= MPT_ULTRA160 ))
2270 max_depth = MPT_SCSI_CMD_PER_DEV_HIGH; 2305 max_depth = MPT_SCSI_CMD_PER_DEV_HIGH;
2271 else 2306 else
2272 max_depth = MPT_SCSI_CMD_PER_DEV_LOW; 2307 max_depth = MPT_SCSI_CMD_PER_DEV_LOW;
@@ -2295,64 +2330,58 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
2295 * Return non-zero if fails. 2330 * Return non-zero if fails.
2296 */ 2331 */
2297int 2332int
2298mptscsih_slave_configure(struct scsi_device *device) 2333mptscsih_slave_configure(struct scsi_device *sdev)
2299{ 2334{
2300 struct Scsi_Host *sh = device->host; 2335 struct Scsi_Host *sh = sdev->host;
2301 VirtDevice *pTarget; 2336 VirtTarget *vtarget;
2337 VirtDevice *vdevice;
2338 struct scsi_target *starget;
2302 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)sh->hostdata; 2339 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)sh->hostdata;
2340 int indexed_lun, lun_index;
2303 2341
2304 if ((hd == NULL) || (hd->Targets == NULL)) { 2342 starget = scsi_target(sdev);
2305 return 0; 2343 vtarget = starget->hostdata;
2306 } 2344 vdevice = sdev->hostdata;
2307 2345
2308 dsprintk((MYIOC_s_INFO_FMT 2346 dsprintk((MYIOC_s_INFO_FMT
2309 "device @ %p, id=%d, LUN=%d, channel=%d\n", 2347 "device @ %p, id=%d, LUN=%d, channel=%d\n",
2310 hd->ioc->name, device, device->id, device->lun, device->channel)); 2348 hd->ioc->name, sdev, sdev->id, sdev->lun, sdev->channel));
2311 dsprintk((MYIOC_s_INFO_FMT 2349 if (hd->ioc->bus_type == SPI)
2312 "sdtr %d wdtr %d ppr %d inq length=%d\n", 2350 dsprintk((MYIOC_s_INFO_FMT
2313 hd->ioc->name, device->sdtr, device->wdtr, 2351 "sdtr %d wdtr %d ppr %d inq length=%d\n",
2314 device->ppr, device->inquiry_len)); 2352 hd->ioc->name, sdev->sdtr, sdev->wdtr,
2315 2353 sdev->ppr, sdev->inquiry_len));
2316 if (device->id > sh->max_id) { 2354
2355 if (sdev->id > sh->max_id) {
2317 /* error case, should never happen */ 2356 /* error case, should never happen */
2318 scsi_adjust_queue_depth(device, 0, 1); 2357 scsi_adjust_queue_depth(sdev, 0, 1);
2319 goto slave_configure_exit;
2320 }
2321
2322 pTarget = hd->Targets[device->id];
2323
2324 if (pTarget == NULL) {
2325 /* Driver doesn't know about this device.
2326 * Kernel may generate a "Dummy Lun 0" which
2327 * may become a real Lun if a
2328 * "scsi add-single-device" command is executed
2329 * while the driver is active (hot-plug a
2330 * device). LSI Raid controllers need
2331 * queue_depth set to DEV_HIGH for this reason.
2332 */
2333 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
2334 MPT_SCSI_CMD_PER_DEV_HIGH);
2335 goto slave_configure_exit; 2358 goto slave_configure_exit;
2336 } 2359 }
2337 2360
2338 mptscsih_initTarget(hd, device->channel, device->id, device->lun, 2361 vdevice->configured_lun=1;
2339 device->inquiry, device->inquiry_len ); 2362 lun_index = (vdevice->lun >> 5); /* 32 luns per lun_index */
2340 mptscsih_change_queue_depth(device, MPT_SCSI_CMD_PER_DEV_HIGH); 2363 indexed_lun = (vdevice->lun % 32);
2364 vtarget->luns[lun_index] |= (1 << indexed_lun);
2365 mptscsih_initTarget(hd, vtarget, sdev->lun, sdev->inquiry,
2366 sdev->inquiry_len );
2367 mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
2341 2368
2342 dsprintk((MYIOC_s_INFO_FMT 2369 dsprintk((MYIOC_s_INFO_FMT
2343 "Queue depth=%d, tflags=%x\n", 2370 "Queue depth=%d, tflags=%x\n",
2344 hd->ioc->name, device->queue_depth, pTarget->tflags)); 2371 hd->ioc->name, sdev->queue_depth, vtarget->tflags));
2345 2372
2346 dsprintk((MYIOC_s_INFO_FMT 2373 if (hd->ioc->bus_type == SPI)
2347 "negoFlags=%x, maxOffset=%x, SyncFactor=%x\n", 2374 dsprintk((MYIOC_s_INFO_FMT
2348 hd->ioc->name, pTarget->negoFlags, pTarget->maxOffset, pTarget->minSyncFactor)); 2375 "negoFlags=%x, maxOffset=%x, SyncFactor=%x\n",
2376 hd->ioc->name, vtarget->negoFlags, vtarget->maxOffset,
2377 vtarget->minSyncFactor));
2349 2378
2350slave_configure_exit: 2379slave_configure_exit:
2351 2380
2352 dsprintk((MYIOC_s_INFO_FMT 2381 dsprintk((MYIOC_s_INFO_FMT
2353 "tagged %d, simple %d, ordered %d\n", 2382 "tagged %d, simple %d, ordered %d\n",
2354 hd->ioc->name,device->tagged_supported, device->simple_tags, 2383 hd->ioc->name,sdev->tagged_supported, sdev->simple_tags,
2355 device->ordered_tags)); 2384 sdev->ordered_tags));
2356 2385
2357 return 0; 2386 return 0;
2358} 2387}
@@ -2370,16 +2399,14 @@ slave_configure_exit:
2370static void 2399static void
2371mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply) 2400mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply)
2372{ 2401{
2373 VirtDevice *target; 2402 VirtDevice *vdev;
2374 SCSIIORequest_t *pReq; 2403 SCSIIORequest_t *pReq;
2375 u32 sense_count = le32_to_cpu(pScsiReply->SenseCount); 2404 u32 sense_count = le32_to_cpu(pScsiReply->SenseCount);
2376 int index;
2377 2405
2378 /* Get target structure 2406 /* Get target structure
2379 */ 2407 */
2380 pReq = (SCSIIORequest_t *) mf; 2408 pReq = (SCSIIORequest_t *) mf;
2381 index = (int) pReq->TargetID; 2409 vdev = sc->device->hostdata;
2382 target = hd->Targets[index];
2383 2410
2384 if (sense_count) { 2411 if (sense_count) {
2385 u8 *sense_data; 2412 u8 *sense_data;
@@ -2393,7 +2420,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
2393 /* Log SMART data (asc = 0x5D, non-IM case only) if required. 2420 /* Log SMART data (asc = 0x5D, non-IM case only) if required.
2394 */ 2421 */
2395 if ((hd->ioc->events) && (hd->ioc->eventTypes & (1 << MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE))) { 2422 if ((hd->ioc->events) && (hd->ioc->eventTypes & (1 << MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE))) {
2396 if ((sense_data[12] == 0x5D) && (target->raidVolume == 0)) { 2423 if ((sense_data[12] == 0x5D) && (vdev->vtarget->raidVolume == 0)) {
2397 int idx; 2424 int idx;
2398 MPT_ADAPTER *ioc = hd->ioc; 2425 MPT_ADAPTER *ioc = hd->ioc;
2399 2426
@@ -2403,7 +2430,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
2403 2430
2404 ioc->events[idx].data[0] = (pReq->LUN[1] << 24) || 2431 ioc->events[idx].data[0] = (pReq->LUN[1] << 24) ||
2405 (MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA << 16) || 2432 (MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA << 16) ||
2406 (pReq->Bus << 8) || pReq->TargetID; 2433 (sc->device->channel << 8) || sc->device->id;
2407 2434
2408 ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12]; 2435 ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12];
2409 2436
@@ -2503,9 +2530,9 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2503 /* 2. Chain Buffer initialization 2530 /* 2. Chain Buffer initialization
2504 */ 2531 */
2505 2532
2506 /* 4. Renegotiate to all devices, if SCSI 2533 /* 4. Renegotiate to all devices, if SPI
2507 */ 2534 */
2508 if (ioc->bus_type == SCSI) { 2535 if (ioc->bus_type == SPI) {
2509 dnegoprintk(("writeSDP1: ALL_IDS USE_NVRAM\n")); 2536 dnegoprintk(("writeSDP1: ALL_IDS USE_NVRAM\n"));
2510 mptscsih_writeSDP1(hd, 0, 0, MPT_SCSICFG_ALL_IDS | MPT_SCSICFG_USE_NVRAM); 2537 mptscsih_writeSDP1(hd, 0, 0, MPT_SCSICFG_ALL_IDS | MPT_SCSICFG_USE_NVRAM);
2511 } 2538 }
@@ -2534,7 +2561,7 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2534 2561
2535 /* 7. Set flag to force DV and re-read IOC Page 3 2562 /* 7. Set flag to force DV and re-read IOC Page 3
2536 */ 2563 */
2537 if (ioc->bus_type == SCSI) { 2564 if (ioc->bus_type == SPI) {
2538 ioc->spi_data.forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3; 2565 ioc->spi_data.forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
2539 ddvtprintk(("Set reload IOC Pg3 Flag\n")); 2566 ddvtprintk(("Set reload IOC Pg3 Flag\n"));
2540 } 2567 }
@@ -2576,7 +2603,7 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2576 break; 2603 break;
2577 case MPI_EVENT_IOC_BUS_RESET: /* 04 */ 2604 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
2578 case MPI_EVENT_EXT_BUS_RESET: /* 05 */ 2605 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
2579 if (hd && (ioc->bus_type == SCSI) && (hd->soft_resets < -1)) 2606 if (hd && (ioc->bus_type == SPI) && (hd->soft_resets < -1))
2580 hd->soft_resets++; 2607 hd->soft_resets++;
2581 break; 2608 break;
2582 case MPI_EVENT_LOGOUT: /* 09 */ 2609 case MPI_EVENT_LOGOUT: /* 09 */
@@ -2597,11 +2624,11 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2597 2624
2598 case MPI_EVENT_INTEGRATED_RAID: /* 0B */ 2625 case MPI_EVENT_INTEGRATED_RAID: /* 0B */
2599 { 2626 {
2627#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
2600 pMpiEventDataRaid_t pRaidEventData = 2628 pMpiEventDataRaid_t pRaidEventData =
2601 (pMpiEventDataRaid_t) pEvReply->Data; 2629 (pMpiEventDataRaid_t) pEvReply->Data;
2602#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
2603 /* Domain Validation Needed */ 2630 /* Domain Validation Needed */
2604 if (ioc->bus_type == SCSI && 2631 if (ioc->bus_type == SPI &&
2605 pRaidEventData->ReasonCode == 2632 pRaidEventData->ReasonCode ==
2606 MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) 2633 MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED)
2607 mptscsih_set_dvflags_raid(hd, pRaidEventData->PhysDiskNum); 2634 mptscsih_set_dvflags_raid(hd, pRaidEventData->PhysDiskNum);
@@ -2632,8 +2659,7 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2632/* 2659/*
2633 * mptscsih_initTarget - Target, LUN alloc/free functionality. 2660 * mptscsih_initTarget - Target, LUN alloc/free functionality.
2634 * @hd: Pointer to MPT_SCSI_HOST structure 2661 * @hd: Pointer to MPT_SCSI_HOST structure
2635 * @bus_id: Bus number (?) 2662 * @vtarget: per target private data
2636 * @target_id: SCSI target id
2637 * @lun: SCSI LUN id 2663 * @lun: SCSI LUN id
2638 * @data: Pointer to data 2664 * @data: Pointer to data
2639 * @dlen: Number of INQUIRY bytes 2665 * @dlen: Number of INQUIRY bytes
@@ -2646,15 +2672,14 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2646 * 2672 *
2647 */ 2673 */
2648static void 2674static void
2649mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *data, int dlen) 2675mptscsih_initTarget(MPT_SCSI_HOST *hd, VirtTarget *vtarget, u8 lun, char *data, int dlen)
2650{ 2676{
2651 int indexed_lun, lun_index;
2652 VirtDevice *vdev;
2653 SpiCfgData *pSpi; 2677 SpiCfgData *pSpi;
2654 char data_56; 2678 char data_56;
2679 int inq_len;
2655 2680
2656 dinitprintk((MYIOC_s_INFO_FMT "initTarget bus=%d id=%d lun=%d hd=%p\n", 2681 dinitprintk((MYIOC_s_INFO_FMT "initTarget bus=%d id=%d lun=%d hd=%p\n",
2657 hd->ioc->name, bus_id, target_id, lun, hd)); 2682 hd->ioc->name, vtarget->bus_id, vtarget->target_id, lun, hd));
2658 2683
2659 /* 2684 /*
2660 * If the peripheral qualifier filter is enabled then if the target reports a 0x1 2685 * If the peripheral qualifier filter is enabled then if the target reports a 0x1
@@ -2674,75 +2699,68 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
2674 if (data[0] & 0xe0) 2699 if (data[0] & 0xe0)
2675 return; 2700 return;
2676 2701
2677 if ((vdev = hd->Targets[target_id]) == NULL) { 2702 if (vtarget == NULL)
2678 return; 2703 return;
2679 }
2680 2704
2681 lun_index = (lun >> 5); /* 32 luns per lun_index */ 2705 if (data)
2682 indexed_lun = (lun % 32); 2706 vtarget->type = data[0];
2683 vdev->luns[lun_index] |= (1 << indexed_lun); 2707
2684 2708 if (hd->ioc->bus_type != SPI)
2685 if (hd->ioc->bus_type == SCSI) { 2709 return;
2686 if ((data[0] == TYPE_PROCESSOR) && (hd->ioc->spi_data.Saf_Te)) { 2710
2687 /* Treat all Processors as SAF-TE if 2711 if ((data[0] == TYPE_PROCESSOR) && (hd->ioc->spi_data.Saf_Te)) {
2688 * command line option is set */ 2712 /* Treat all Processors as SAF-TE if
2689 vdev->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED; 2713 * command line option is set */
2690 mptscsih_writeIOCPage4(hd, target_id, bus_id); 2714 vtarget->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
2691 }else if ((data[0] == TYPE_PROCESSOR) && 2715 mptscsih_writeIOCPage4(hd, vtarget->target_id, vtarget->bus_id);
2692 !(vdev->tflags & MPT_TARGET_FLAGS_SAF_TE_ISSUED )) { 2716 }else if ((data[0] == TYPE_PROCESSOR) &&
2693 if ( dlen > 49 ) { 2717 !(vtarget->tflags & MPT_TARGET_FLAGS_SAF_TE_ISSUED )) {
2694 vdev->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY; 2718 if ( dlen > 49 ) {
2695 if ( data[44] == 'S' && 2719 vtarget->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
2696 data[45] == 'A' && 2720 if ( data[44] == 'S' &&
2697 data[46] == 'F' && 2721 data[45] == 'A' &&
2698 data[47] == '-' && 2722 data[46] == 'F' &&
2699 data[48] == 'T' && 2723 data[47] == '-' &&
2700 data[49] == 'E' ) { 2724 data[48] == 'T' &&
2701 vdev->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED; 2725 data[49] == 'E' ) {
2702 mptscsih_writeIOCPage4(hd, target_id, bus_id); 2726 vtarget->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
2703 } 2727 mptscsih_writeIOCPage4(hd, vtarget->target_id, vtarget->bus_id);
2704 } 2728 }
2705 } 2729 }
2706 if (!(vdev->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY)) { 2730 }
2707 if ( dlen > 8 ) { 2731 if (!(vtarget->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY)) {
2708 memcpy (vdev->inq_data, data, 8); 2732 inq_len = dlen < 8 ? dlen : 8;
2709 } else { 2733 memcpy (vtarget->inq_data, data, inq_len);
2710 memcpy (vdev->inq_data, data, dlen); 2734 /* If have not done DV, set the DV flag.
2711 } 2735 */
2736 pSpi = &hd->ioc->spi_data;
2737 if ((data[0] == TYPE_TAPE) || (data[0] == TYPE_PROCESSOR)) {
2738 if (pSpi->dvStatus[vtarget->target_id] & MPT_SCSICFG_DV_NOT_DONE)
2739 pSpi->dvStatus[vtarget->target_id] |= MPT_SCSICFG_NEED_DV;
2740 }
2741 vtarget->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
2712 2742
2713 /* If have not done DV, set the DV flag. 2743 data_56 = 0x0F; /* Default to full capabilities if Inq data length is < 57 */
2744 if (dlen > 56) {
2745 if ( (!(vtarget->tflags & MPT_TARGET_FLAGS_VALID_56))) {
2746 /* Update the target capabilities
2714 */ 2747 */
2715 pSpi = &hd->ioc->spi_data; 2748 data_56 = data[56];
2716 if ((data[0] == TYPE_TAPE) || (data[0] == TYPE_PROCESSOR)) { 2749 vtarget->tflags |= MPT_TARGET_FLAGS_VALID_56;
2717 if (pSpi->dvStatus[target_id] & MPT_SCSICFG_DV_NOT_DONE)
2718 pSpi->dvStatus[target_id] |= MPT_SCSICFG_NEED_DV;
2719 }
2720
2721 vdev->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
2722
2723
2724 data_56 = 0x0F; /* Default to full capabilities if Inq data length is < 57 */
2725 if (dlen > 56) {
2726 if ( (!(vdev->tflags & MPT_TARGET_FLAGS_VALID_56))) {
2727 /* Update the target capabilities
2728 */
2729 data_56 = data[56];
2730 vdev->tflags |= MPT_TARGET_FLAGS_VALID_56;
2731 }
2732 } 2750 }
2733 mptscsih_setTargetNegoParms(hd, vdev, data_56); 2751 }
2734 } else { 2752 mptscsih_setTargetNegoParms(hd, vtarget, data_56);
2735 /* Initial Inquiry may not request enough data bytes to 2753 } else {
2736 * obtain byte 57. DV will; if target doesn't return 2754 /* Initial Inquiry may not request enough data bytes to
2737 * at least 57 bytes, data[56] will be zero. */ 2755 * obtain byte 57. DV will; if target doesn't return
2738 if (dlen > 56) { 2756 * at least 57 bytes, data[56] will be zero. */
2739 if ( (!(vdev->tflags & MPT_TARGET_FLAGS_VALID_56))) { 2757 if (dlen > 56) {
2740 /* Update the target capabilities 2758 if ( (!(vtarget->tflags & MPT_TARGET_FLAGS_VALID_56))) {
2741 */ 2759 /* Update the target capabilities
2742 data_56 = data[56]; 2760 */
2743 vdev->tflags |= MPT_TARGET_FLAGS_VALID_56; 2761 data_56 = data[56];
2744 mptscsih_setTargetNegoParms(hd, vdev, data_56); 2762 vtarget->tflags |= MPT_TARGET_FLAGS_VALID_56;
2745 } 2763 mptscsih_setTargetNegoParms(hd, vtarget, data_56);
2746 } 2764 }
2747 } 2765 }
2748 } 2766 }
@@ -2755,12 +2773,12 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
2755 * 2773 *
2756 */ 2774 */
2757static void 2775static void
2758mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56) 2776mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target, char byte56)
2759{ 2777{
2760 SpiCfgData *pspi_data = &hd->ioc->spi_data; 2778 SpiCfgData *pspi_data = &hd->ioc->spi_data;
2761 int id = (int) target->target_id; 2779 int id = (int) target->target_id;
2762 int nvram; 2780 int nvram;
2763 VirtDevice *vdev; 2781 VirtTarget *vtarget;
2764 int ii; 2782 int ii;
2765 u8 width = MPT_NARROW; 2783 u8 width = MPT_NARROW;
2766 u8 factor = MPT_ASYNC; 2784 u8 factor = MPT_ASYNC;
@@ -2905,9 +2923,9 @@ mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
2905 2923
2906 ddvtprintk((KERN_INFO "Disabling QAS due to noQas=%02x on id=%d!\n", noQas, id)); 2924 ddvtprintk((KERN_INFO "Disabling QAS due to noQas=%02x on id=%d!\n", noQas, id));
2907 for (ii = 0; ii < id; ii++) { 2925 for (ii = 0; ii < id; ii++) {
2908 if ( (vdev = hd->Targets[ii]) ) { 2926 if ( (vtarget = hd->Targets[ii]) ) {
2909 vdev->negoFlags |= MPT_TARGET_NO_NEGO_QAS; 2927 vtarget->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
2910 mptscsih_writeSDP1(hd, 0, ii, vdev->negoFlags); 2928 mptscsih_writeSDP1(hd, 0, ii, vtarget->negoFlags);
2911 } 2929 }
2912 } 2930 }
2913 } 2931 }
@@ -2926,105 +2944,17 @@ mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
2926} 2944}
2927 2945
2928/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2946/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2929/* If DV disabled (negoNvram set to USE_NVARM) or if not LUN 0, return.
2930 * Else set the NEED_DV flag after Read Capacity Issued (disks)
2931 * or Mode Sense (cdroms).
2932 *
2933 * Tapes, initTarget will set this flag on completion of Inquiry command.
2934 * Called only if DV_NOT_DONE flag is set
2935 */
2936static void
2937mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
2938{
2939 MPT_ADAPTER *ioc = hd->ioc;
2940 u8 cmd;
2941 SpiCfgData *pSpi;
2942
2943 ddvtprintk((MYIOC_s_NOTE_FMT
2944 " set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n",
2945 hd->ioc->name, pReq->TargetID, pReq->LUN[1], hd->negoNvram, pReq->CDB[0]));
2946
2947 if ((pReq->LUN[1] != 0) || (hd->negoNvram != 0))
2948 return;
2949
2950 cmd = pReq->CDB[0];
2951
2952 if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE)) {
2953 pSpi = &ioc->spi_data;
2954 if ((ioc->raid_data.isRaid & (1 << pReq->TargetID)) && ioc->raid_data.pIocPg3) {
2955 /* Set NEED_DV for all hidden disks
2956 */
2957 Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
2958 int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
2959
2960 while (numPDisk) {
2961 pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV;
2962 ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID));
2963 pPDisk++;
2964 numPDisk--;
2965 }
2966 }
2967 pSpi->dvStatus[pReq->TargetID] |= MPT_SCSICFG_NEED_DV;
2968 ddvtprintk(("NEED_DV set for visible disk id %d\n", pReq->TargetID));
2969 }
2970}
2971
2972/* mptscsih_raid_set_dv_flags()
2973 *
2974 * New or replaced disk. Set DV flag and schedule DV.
2975 */
2976static void
2977mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id)
2978{
2979 MPT_ADAPTER *ioc = hd->ioc;
2980 SpiCfgData *pSpi = &ioc->spi_data;
2981 Ioc3PhysDisk_t *pPDisk;
2982 int numPDisk;
2983
2984 if (hd->negoNvram != 0)
2985 return;
2986
2987 ddvtprintk(("DV requested for phys disk id %d\n", id));
2988 if (ioc->raid_data.pIocPg3) {
2989 pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
2990 numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
2991 while (numPDisk) {
2992 if (id == pPDisk->PhysDiskNum) {
2993 pSpi->dvStatus[pPDisk->PhysDiskID] =
2994 (MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
2995 pSpi->forceDv = MPT_SCSICFG_NEED_DV;
2996 ddvtprintk(("NEED_DV set for phys disk id %d\n",
2997 pPDisk->PhysDiskID));
2998 break;
2999 }
3000 pPDisk++;
3001 numPDisk--;
3002 }
3003
3004 if (numPDisk == 0) {
3005 /* The physical disk that needs DV was not found
3006 * in the stored IOC Page 3. The driver must reload
3007 * this page. DV routine will set the NEED_DV flag for
3008 * all phys disks that have DV_NOT_DONE set.
3009 */
3010 pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
3011 ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n",id));
3012 }
3013 }
3014}
3015
3016/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3017/* 2947/*
3018 * If no Target, bus reset on 1st I/O. Set the flag to 2948 * If no Target, bus reset on 1st I/O. Set the flag to
3019 * prevent any future negotiations to this device. 2949 * prevent any future negotiations to this device.
3020 */ 2950 */
3021static void 2951static void
3022mptscsih_no_negotiate(MPT_SCSI_HOST *hd, int target_id) 2952mptscsih_no_negotiate(MPT_SCSI_HOST *hd, struct scsi_cmnd *sc)
3023{ 2953{
2954 VirtDevice *vdev;
3024 2955
3025 if ((hd->Targets) && (hd->Targets[target_id] == NULL)) 2956 if ((vdev = sc->device->hostdata) != NULL)
3026 hd->ioc->spi_data.dvStatus[target_id] |= MPT_SCSICFG_BLK_NEGO; 2957 hd->ioc->spi_data.dvStatus[vdev->target_id] |= MPT_SCSICFG_BLK_NEGO;
3027
3028 return; 2958 return;
3029} 2959}
3030 2960
@@ -3100,7 +3030,7 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
3100 MPT_ADAPTER *ioc = hd->ioc; 3030 MPT_ADAPTER *ioc = hd->ioc;
3101 Config_t *pReq; 3031 Config_t *pReq;
3102 SCSIDevicePage1_t *pData; 3032 SCSIDevicePage1_t *pData;
3103 VirtDevice *pTarget=NULL; 3033 VirtTarget *vtarget=NULL;
3104 MPT_FRAME_HDR *mf; 3034 MPT_FRAME_HDR *mf;
3105 dma_addr_t dataDma; 3035 dma_addr_t dataDma;
3106 u16 req_idx; 3036 u16 req_idx;
@@ -3180,11 +3110,11 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
3180 /* If id is not a raid volume, get the updated 3110 /* If id is not a raid volume, get the updated
3181 * transmission settings from the target structure. 3111 * transmission settings from the target structure.
3182 */ 3112 */
3183 if (hd->Targets && (pTarget = hd->Targets[id]) && !pTarget->raidVolume) { 3113 if (hd->Targets && (vtarget = hd->Targets[id]) && !vtarget->raidVolume) {
3184 width = pTarget->maxWidth; 3114 width = vtarget->maxWidth;
3185 factor = pTarget->minSyncFactor; 3115 factor = vtarget->minSyncFactor;
3186 offset = pTarget->maxOffset; 3116 offset = vtarget->maxOffset;
3187 negoFlags = pTarget->negoFlags; 3117 negoFlags = vtarget->negoFlags;
3188 } 3118 }
3189 3119
3190#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION 3120#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
@@ -3904,149 +3834,139 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3904 3834
3905/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3835/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3906/** 3836/**
3907 * mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks. 3837 * mptscsih_negotiate_to_asyn_narrow - Restore devices to default state
3908 * @hd: Pointer to MPT_SCSI_HOST structure 3838 * @hd: Pointer to a SCSI HOST structure
3909 * @portnum: IOC port number 3839 * @vtarget: per device private data
3910 * 3840 *
3911 * Uses the ISR, but with special processing. 3841 * Uses the ISR, but with special processing.
3912 * MUST be single-threaded. 3842 * MUST be single-threaded.
3913 * 3843 *
3914 * Return: 0 on completion
3915 */ 3844 */
3916static int 3845static void
3917mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum) 3846mptscsih_negotiate_to_asyn_narrow(MPT_SCSI_HOST *hd, VirtTarget *vtarget)
3918{ 3847{
3919 MPT_ADAPTER *ioc= hd->ioc; 3848 MPT_ADAPTER *ioc= hd->ioc;
3920 VirtDevice *pTarget; 3849 SCSIDevicePage1_t *pcfg1Data;
3921 SCSIDevicePage1_t *pcfg1Data = NULL;
3922 INTERNAL_CMD iocmd;
3923 CONFIGPARMS cfg; 3850 CONFIGPARMS cfg;
3924 dma_addr_t cfg1_dma_addr = -1; 3851 dma_addr_t cfg1_dma_addr;
3925 ConfigPageHeader_t header1; 3852 ConfigPageHeader_t header;
3926 int bus = 0; 3853 int id;
3927 int id = 0; 3854 int requested, configuration, data,i;
3928 int lun;
3929 int indexed_lun, lun_index;
3930 int hostId = ioc->pfacts[portnum].PortSCSIID;
3931 int max_id;
3932 int requested, configuration, data;
3933 int doConfig = 0;
3934 u8 flags, factor; 3855 u8 flags, factor;
3935 3856
3936 max_id = ioc->sh->max_id - 1; 3857 if (ioc->bus_type != SPI)
3937 3858 return;
3938 /* Following parameters will not change
3939 * in this routine.
3940 */
3941 iocmd.cmd = SYNCHRONIZE_CACHE;
3942 iocmd.flags = 0;
3943 iocmd.physDiskNum = -1;
3944 iocmd.data = NULL;
3945 iocmd.data_dma = -1;
3946 iocmd.size = 0;
3947 iocmd.rsvd = iocmd.rsvd2 = 0;
3948
3949 /* No SCSI hosts
3950 */
3951 if (hd->Targets == NULL)
3952 return 0;
3953
3954 /* Skip the host
3955 */
3956 if (id == hostId)
3957 id++;
3958
3959 /* Write SDP1 for all SCSI devices
3960 * Alloc memory and set up config buffer
3961 */
3962 if (ioc->bus_type == SCSI) {
3963 if (ioc->spi_data.sdp1length > 0) {
3964 pcfg1Data = (SCSIDevicePage1_t *)pci_alloc_consistent(ioc->pcidev,
3965 ioc->spi_data.sdp1length * 4, &cfg1_dma_addr);
3966
3967 if (pcfg1Data != NULL) {
3968 doConfig = 1;
3969 header1.PageVersion = ioc->spi_data.sdp1version;
3970 header1.PageLength = ioc->spi_data.sdp1length;
3971 header1.PageNumber = 1;
3972 header1.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
3973 cfg.cfghdr.hdr = &header1;
3974 cfg.physAddr = cfg1_dma_addr;
3975 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
3976 cfg.dir = 1;
3977 cfg.timeout = 0;
3978 }
3979 }
3980 }
3981 3859
3982 /* loop through all devices on this port 3860 if (!ioc->spi_data.sdp1length)
3983 */ 3861 return;
3984 while (bus < MPT_MAX_BUS) {
3985 iocmd.bus = bus;
3986 iocmd.id = id;
3987 pTarget = hd->Targets[(int)id];
3988 3862
3989 if (doConfig) { 3863 pcfg1Data = (SCSIDevicePage1_t *)pci_alloc_consistent(ioc->pcidev,
3864 ioc->spi_data.sdp1length * 4, &cfg1_dma_addr);
3990 3865
3991 /* Set the negotiation flags */ 3866 if (pcfg1Data == NULL)
3992 if (pTarget && (pTarget = hd->Targets[id]) && !pTarget->raidVolume) { 3867 return;
3993 flags = pTarget->negoFlags;
3994 } else {
3995 flags = hd->ioc->spi_data.noQas;
3996 if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
3997 data = hd->ioc->spi_data.nvram[id];
3998 3868
3999 if (data & MPT_NVRAM_WIDE_DISABLE) 3869 header.PageVersion = ioc->spi_data.sdp1version;
4000 flags |= MPT_TARGET_NO_NEGO_WIDE; 3870 header.PageLength = ioc->spi_data.sdp1length;
3871 header.PageNumber = 1;
3872 header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
3873 cfg.cfghdr.hdr = &header;
3874 cfg.physAddr = cfg1_dma_addr;
3875 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
3876 cfg.dir = 1;
3877 cfg.timeout = 0;
4001 3878
4002 factor = (data & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT; 3879 if (vtarget->raidVolume && ioc->raid_data.pIocPg3) {
4003 if ((factor == 0) || (factor == MPT_ASYNC)) 3880 for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
4004 flags |= MPT_TARGET_NO_NEGO_SYNC; 3881 id = ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID;
4005 } 3882 flags = hd->ioc->spi_data.noQas;
3883 if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
3884 data = hd->ioc->spi_data.nvram[id];
3885 if (data & MPT_NVRAM_WIDE_DISABLE)
3886 flags |= MPT_TARGET_NO_NEGO_WIDE;
3887 factor = (data & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT;
3888 if ((factor == 0) || (factor == MPT_ASYNC))
3889 flags |= MPT_TARGET_NO_NEGO_SYNC;
4006 } 3890 }
4007
4008 /* Force to async, narrow */
4009 mptscsih_setDevicePage1Flags(0, MPT_ASYNC, 0, &requested, 3891 mptscsih_setDevicePage1Flags(0, MPT_ASYNC, 0, &requested,
4010 &configuration, flags); 3892 &configuration, flags);
4011 dnegoprintk(("syncronize cache: id=%d width=0 factor=MPT_ASYNC " 3893 dnegoprintk(("syncronize cache: id=%d width=0 factor=MPT_ASYNC "
4012 "offset=0 negoFlags=%x request=%x config=%x\n", 3894 "offset=0 negoFlags=%x request=%x config=%x\n",
4013 id, flags, requested, configuration)); 3895 id, flags, requested, configuration));
4014 pcfg1Data->RequestedParameters = cpu_to_le32(requested); 3896 pcfg1Data->RequestedParameters = cpu_to_le32(requested);
4015 pcfg1Data->Reserved = 0; 3897 pcfg1Data->Reserved = 0;
4016 pcfg1Data->Configuration = cpu_to_le32(configuration); 3898 pcfg1Data->Configuration = cpu_to_le32(configuration);
4017 cfg.pageAddr = (bus<<8) | id; 3899 cfg.pageAddr = (vtarget->bus_id<<8) | id;
4018 mpt_config(hd->ioc, &cfg); 3900 mpt_config(hd->ioc, &cfg);
4019 } 3901 }
3902 } else {
3903 flags = vtarget->negoFlags;
3904 mptscsih_setDevicePage1Flags(0, MPT_ASYNC, 0, &requested,
3905 &configuration, flags);
3906 dnegoprintk(("syncronize cache: id=%d width=0 factor=MPT_ASYNC "
3907 "offset=0 negoFlags=%x request=%x config=%x\n",
3908 vtarget->target_id, flags, requested, configuration));
3909 pcfg1Data->RequestedParameters = cpu_to_le32(requested);
3910 pcfg1Data->Reserved = 0;
3911 pcfg1Data->Configuration = cpu_to_le32(configuration);
3912 cfg.pageAddr = (vtarget->bus_id<<8) | vtarget->target_id;
3913 mpt_config(hd->ioc, &cfg);
3914 }
4020 3915
4021 /* If target Ptr NULL or if this target is NOT a disk, skip. 3916 if (pcfg1Data)
4022 */ 3917 pci_free_consistent(ioc->pcidev, header.PageLength * 4, pcfg1Data, cfg1_dma_addr);
4023 if ((pTarget) && (pTarget->inq_data[0] == TYPE_DISK)){ 3918}
4024 for (lun=0; lun <= MPT_LAST_LUN; lun++) {
4025 /* If LUN present, issue the command
4026 */
4027 lun_index = (lun >> 5); /* 32 luns per lun_index */
4028 indexed_lun = (lun % 32);
4029 if (pTarget->luns[lun_index] & (1<<indexed_lun)) {
4030 iocmd.lun = lun;
4031 (void) mptscsih_do_cmd(hd, &iocmd);
4032 }
4033 }
4034 }
4035 3919
4036 /* get next relevant device */ 3920/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4037 id++; 3921/**
3922 * mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks.
3923 * @hd: Pointer to a SCSI HOST structure
3924 * @vtarget: per device private data
3925 * @lun: lun
3926 *
3927 * Uses the ISR, but with special processing.
3928 * MUST be single-threaded.
3929 *
3930 */
3931static void
3932mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
3933{
3934 INTERNAL_CMD iocmd;
4038 3935
4039 if (id == hostId) 3936 /* Following parameters will not change
4040 id++; 3937 * in this routine.
3938 */
3939 iocmd.cmd = SYNCHRONIZE_CACHE;
3940 iocmd.flags = 0;
3941 iocmd.physDiskNum = -1;
3942 iocmd.data = NULL;
3943 iocmd.data_dma = -1;
3944 iocmd.size = 0;
3945 iocmd.rsvd = iocmd.rsvd2 = 0;
3946 iocmd.bus = vdevice->bus_id;
3947 iocmd.id = vdevice->target_id;
3948 iocmd.lun = (u8)vdevice->lun;
4041 3949
4042 if (id > max_id) { 3950 if ((vdevice->vtarget->type & TYPE_DISK) &&
4043 id = 0; 3951 (vdevice->configured_lun))
4044 bus++; 3952 mptscsih_do_cmd(hd, &iocmd);
4045 } 3953}
4046 }
4047 3954
4048 if (pcfg1Data) { 3955/* Search IOC page 3 to determine if this is hidden physical disk
4049 pci_free_consistent(ioc->pcidev, header1.PageLength * 4, pcfg1Data, cfg1_dma_addr); 3956 */
3957/* Search IOC page 3 to determine if this is hidden physical disk
3958 */
3959static int
3960mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id)
3961{
3962 int i;
3963
3964 if (!ioc->raid_data.isRaid || !ioc->raid_data.pIocPg3)
3965 return 0;
3966
3967 for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
3968 if (id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID)
3969 return 1;
4050 } 3970 }
4051 3971
4052 return 0; 3972 return 0;
@@ -4101,8 +4021,8 @@ mptscsih_domainValidation(void *arg)
4101 4021
4102 msleep(250); 4022 msleep(250);
4103 4023
4104 /* DV only to SCSI adapters */ 4024 /* DV only to SPI adapters */
4105 if (ioc->bus_type != SCSI) 4025 if (ioc->bus_type != SPI)
4106 continue; 4026 continue;
4107 4027
4108 /* Make sure everything looks ok */ 4028 /* Make sure everything looks ok */
@@ -4205,32 +4125,12 @@ mptscsih_domainValidation(void *arg)
4205 return; 4125 return;
4206} 4126}
4207 4127
4208/* Search IOC page 3 to determine if this is hidden physical disk
4209 */
4210/* Search IOC page 3 to determine if this is hidden physical disk
4211 */
4212static int
4213mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id)
4214{
4215 int i;
4216
4217 if (!ioc->raid_data.isRaid || !ioc->raid_data.pIocPg3)
4218 return 0;
4219
4220 for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
4221 if (id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID)
4222 return 1;
4223 }
4224
4225 return 0;
4226}
4227
4228/* Write SDP1 if no QAS has been enabled 4128/* Write SDP1 if no QAS has been enabled
4229 */ 4129 */
4230static void 4130static void
4231mptscsih_qas_check(MPT_SCSI_HOST *hd, int id) 4131mptscsih_qas_check(MPT_SCSI_HOST *hd, int id)
4232{ 4132{
4233 VirtDevice *pTarget; 4133 VirtTarget *vtarget;
4234 int ii; 4134 int ii;
4235 4135
4236 if (hd->Targets == NULL) 4136 if (hd->Targets == NULL)
@@ -4243,11 +4143,11 @@ mptscsih_qas_check(MPT_SCSI_HOST *hd, int id)
4243 if ((hd->ioc->spi_data.dvStatus[ii] & MPT_SCSICFG_DV_NOT_DONE) != 0) 4143 if ((hd->ioc->spi_data.dvStatus[ii] & MPT_SCSICFG_DV_NOT_DONE) != 0)
4244 continue; 4144 continue;
4245 4145
4246 pTarget = hd->Targets[ii]; 4146 vtarget = hd->Targets[ii];
4247 4147
4248 if ((pTarget != NULL) && (!pTarget->raidVolume)) { 4148 if ((vtarget != NULL) && (!vtarget->raidVolume)) {
4249 if ((pTarget->negoFlags & hd->ioc->spi_data.noQas) == 0) { 4149 if ((vtarget->negoFlags & hd->ioc->spi_data.noQas) == 0) {
4250 pTarget->negoFlags |= hd->ioc->spi_data.noQas; 4150 vtarget->negoFlags |= hd->ioc->spi_data.noQas;
4251 dnegoprintk(("writeSDP1: id=%d flags=0\n", id)); 4151 dnegoprintk(("writeSDP1: id=%d flags=0\n", id));
4252 mptscsih_writeSDP1(hd, 0, ii, 0); 4152 mptscsih_writeSDP1(hd, 0, ii, 0);
4253 } 4153 }
@@ -4287,7 +4187,7 @@ static int
4287mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id) 4187mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4288{ 4188{
4289 MPT_ADAPTER *ioc = hd->ioc; 4189 MPT_ADAPTER *ioc = hd->ioc;
4290 VirtDevice *pTarget; 4190 VirtTarget *vtarget;
4291 SCSIDevicePage1_t *pcfg1Data; 4191 SCSIDevicePage1_t *pcfg1Data;
4292 SCSIDevicePage0_t *pcfg0Data; 4192 SCSIDevicePage0_t *pcfg0Data;
4293 u8 *pbuf1; 4193 u8 *pbuf1;
@@ -4358,12 +4258,12 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4358 iocmd.physDiskNum = -1; 4258 iocmd.physDiskNum = -1;
4359 iocmd.rsvd = iocmd.rsvd2 = 0; 4259 iocmd.rsvd = iocmd.rsvd2 = 0;
4360 4260
4361 pTarget = hd->Targets[id]; 4261 vtarget = hd->Targets[id];
4362 4262
4363 /* Use tagged commands if possible. 4263 /* Use tagged commands if possible.
4364 */ 4264 */
4365 if (pTarget) { 4265 if (vtarget) {
4366 if (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES) 4266 if (vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
4367 iocmd.flags |= MPT_ICFLAG_TAGGED_CMD; 4267 iocmd.flags |= MPT_ICFLAG_TAGGED_CMD;
4368 else { 4268 else {
4369 if (hd->ioc->facts.FWVersion.Word < 0x01000600) 4269 if (hd->ioc->facts.FWVersion.Word < 0x01000600)
@@ -4579,7 +4479,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4579 /* Reset the size for disks 4479 /* Reset the size for disks
4580 */ 4480 */
4581 inq0 = (*pbuf1) & 0x1F; 4481 inq0 = (*pbuf1) & 0x1F;
4582 if ((inq0 == 0) && pTarget && !pTarget->raidVolume) { 4482 if ((inq0 == 0) && vtarget && !vtarget->raidVolume) {
4583 sz = 0x40; 4483 sz = 0x40;
4584 iocmd.size = sz; 4484 iocmd.size = sz;
4585 } 4485 }
@@ -4589,8 +4489,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4589 */ 4489 */
4590 if (inq0 == TYPE_PROCESSOR) { 4490 if (inq0 == TYPE_PROCESSOR) {
4591 mptscsih_initTarget(hd, 4491 mptscsih_initTarget(hd,
4592 bus, 4492 vtarget,
4593 id,
4594 lun, 4493 lun,
4595 pbuf1, 4494 pbuf1,
4596 sz); 4495 sz);
@@ -4604,22 +4503,22 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4604 goto target_done; 4503 goto target_done;
4605 4504
4606 if (sz == 0x40) { 4505 if (sz == 0x40) {
4607 if ((pTarget->maxWidth == 1) && (pTarget->maxOffset) && (nfactor < 0x0A) 4506 if ((vtarget->maxWidth == 1) && (vtarget->maxOffset) && (nfactor < 0x0A)
4608 && (pTarget->minSyncFactor > 0x09)) { 4507 && (vtarget->minSyncFactor > 0x09)) {
4609 if ((pbuf1[56] & 0x04) == 0) 4508 if ((pbuf1[56] & 0x04) == 0)
4610 ; 4509 ;
4611 else if ((pbuf1[56] & 0x01) == 1) { 4510 else if ((pbuf1[56] & 0x01) == 1) {
4612 pTarget->minSyncFactor = 4511 vtarget->minSyncFactor =
4613 nfactor > MPT_ULTRA320 ? nfactor : MPT_ULTRA320; 4512 nfactor > MPT_ULTRA320 ? nfactor : MPT_ULTRA320;
4614 } else { 4513 } else {
4615 pTarget->minSyncFactor = 4514 vtarget->minSyncFactor =
4616 nfactor > MPT_ULTRA160 ? nfactor : MPT_ULTRA160; 4515 nfactor > MPT_ULTRA160 ? nfactor : MPT_ULTRA160;
4617 } 4516 }
4618 4517
4619 dv.max.factor = pTarget->minSyncFactor; 4518 dv.max.factor = vtarget->minSyncFactor;
4620 4519
4621 if ((pbuf1[56] & 0x02) == 0) { 4520 if ((pbuf1[56] & 0x02) == 0) {
4622 pTarget->negoFlags |= MPT_TARGET_NO_NEGO_QAS; 4521 vtarget->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
4623 hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS; 4522 hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS;
4624 ddvprintk((MYIOC_s_NOTE_FMT 4523 ddvprintk((MYIOC_s_NOTE_FMT
4625 "DV: Start Basic noQas on id=%d due to pbuf1[56]=%x\n", 4524 "DV: Start Basic noQas on id=%d due to pbuf1[56]=%x\n",
@@ -4702,8 +4601,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4702 "DV:Inquiry compared id=%d, calling initTarget\n", ioc->name, id)); 4601 "DV:Inquiry compared id=%d, calling initTarget\n", ioc->name, id));
4703 hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_DV_NOT_DONE; 4602 hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_DV_NOT_DONE;
4704 mptscsih_initTarget(hd, 4603 mptscsih_initTarget(hd,
4705 bus, 4604 vtarget,
4706 id,
4707 lun, 4605 lun,
4708 pbuf1, 4606 pbuf1,
4709 sz); 4607 sz);
@@ -5204,7 +5102,7 @@ target_done:
5204static void 5102static void
5205mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage) 5103mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
5206{ 5104{
5207 VirtDevice *pTarget; 5105 VirtTarget *vtarget;
5208 SCSIDevicePage0_t *pPage0; 5106 SCSIDevicePage0_t *pPage0;
5209 SCSIDevicePage1_t *pPage1; 5107 SCSIDevicePage1_t *pPage1;
5210 int val = 0, data, configuration; 5108 int val = 0, data, configuration;
@@ -5224,11 +5122,11 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
5224 * already throttled back. 5122 * already throttled back.
5225 */ 5123 */
5226 negoFlags = hd->ioc->spi_data.noQas; 5124 negoFlags = hd->ioc->spi_data.noQas;
5227 if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume) { 5125 if ((hd->Targets)&&((vtarget = hd->Targets[(int)id]) != NULL) && !vtarget->raidVolume) {
5228 width = pTarget->maxWidth; 5126 width = vtarget->maxWidth;
5229 offset = pTarget->maxOffset; 5127 offset = vtarget->maxOffset;
5230 factor = pTarget->minSyncFactor; 5128 factor = vtarget->minSyncFactor;
5231 negoFlags |= pTarget->negoFlags; 5129 negoFlags |= vtarget->negoFlags;
5232 } else { 5130 } else {
5233 if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) { 5131 if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
5234 data = hd->ioc->spi_data.nvram[id]; 5132 data = hd->ioc->spi_data.nvram[id];
@@ -5430,11 +5328,11 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
5430 * or overwrite nvram (phys disks only). 5328 * or overwrite nvram (phys disks only).
5431 */ 5329 */
5432 5330
5433 if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume ) { 5331 if ((hd->Targets)&&((vtarget = hd->Targets[(int)id]) != NULL) && !vtarget->raidVolume ) {
5434 pTarget->maxWidth = dv->now.width; 5332 vtarget->maxWidth = dv->now.width;
5435 pTarget->maxOffset = dv->now.offset; 5333 vtarget->maxOffset = dv->now.offset;
5436 pTarget->minSyncFactor = dv->now.factor; 5334 vtarget->minSyncFactor = dv->now.factor;
5437 pTarget->negoFlags = dv->now.flags; 5335 vtarget->negoFlags = dv->now.flags;
5438 } else { 5336 } else {
5439 /* Preserv all flags, use 5337 /* Preserv all flags, use
5440 * read-modify-write algorithm 5338 * read-modify-write algorithm
@@ -5588,6 +5486,94 @@ mptscsih_fillbuf(char *buffer, int size, int index, int width)
5588 break; 5486 break;
5589 } 5487 }
5590} 5488}
5489
5490/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5491/* If DV disabled (negoNvram set to USE_NVARM) or if not LUN 0, return.
5492 * Else set the NEED_DV flag after Read Capacity Issued (disks)
5493 * or Mode Sense (cdroms).
5494 *
5495 * Tapes, initTarget will set this flag on completion of Inquiry command.
5496 * Called only if DV_NOT_DONE flag is set
5497 */
5498static void
5499mptscsih_set_dvflags(MPT_SCSI_HOST *hd, struct scsi_cmnd *sc)
5500{
5501 MPT_ADAPTER *ioc = hd->ioc;
5502 u8 cmd;
5503 SpiCfgData *pSpi;
5504
5505 ddvtprintk((MYIOC_s_NOTE_FMT
5506 " set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n",
5507 hd->ioc->name, sc->device->id, sc->device->lun , hd->negoNvram, sc->cmnd[0]));
5508
5509 if ((sc->device->lun != 0) || (hd->negoNvram != 0))
5510 return;
5511
5512 cmd = sc->cmnd[0];
5513
5514 if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE)) {
5515 pSpi = &ioc->spi_data;
5516 if ((ioc->raid_data.isRaid & (1 << sc->device->id)) && ioc->raid_data.pIocPg3) {
5517 /* Set NEED_DV for all hidden disks
5518 */
5519 Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
5520 int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
5521
5522 while (numPDisk) {
5523 pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV;
5524 ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID));
5525 pPDisk++;
5526 numPDisk--;
5527 }
5528 }
5529 pSpi->dvStatus[sc->device->id] |= MPT_SCSICFG_NEED_DV;
5530 ddvtprintk(("NEED_DV set for visible disk id %d\n", sc->device->id));
5531 }
5532}
5533
5534/* mptscsih_raid_set_dv_flags()
5535 *
5536 * New or replaced disk. Set DV flag and schedule DV.
5537 */
5538static void
5539mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id)
5540{
5541 MPT_ADAPTER *ioc = hd->ioc;
5542 SpiCfgData *pSpi = &ioc->spi_data;
5543 Ioc3PhysDisk_t *pPDisk;
5544 int numPDisk;
5545
5546 if (hd->negoNvram != 0)
5547 return;
5548
5549 ddvtprintk(("DV requested for phys disk id %d\n", id));
5550 if (ioc->raid_data.pIocPg3) {
5551 pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
5552 numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
5553 while (numPDisk) {
5554 if (id == pPDisk->PhysDiskNum) {
5555 pSpi->dvStatus[pPDisk->PhysDiskID] =
5556 (MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
5557 pSpi->forceDv = MPT_SCSICFG_NEED_DV;
5558 ddvtprintk(("NEED_DV set for phys disk id %d\n",
5559 pPDisk->PhysDiskID));
5560 break;
5561 }
5562 pPDisk++;
5563 numPDisk--;
5564 }
5565
5566 if (numPDisk == 0) {
5567 /* The physical disk that needs DV was not found
5568 * in the stored IOC Page 3. The driver must reload
5569 * this page. DV routine will set the NEED_DV flag for
5570 * all phys disks that have DV_NOT_DONE set.
5571 */
5572 pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
5573 ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n",id));
5574 }
5575 }
5576}
5591#endif /* ~MPTSCSIH_ENABLE_DOMAIN_VALIDATION */ 5577#endif /* ~MPTSCSIH_ENABLE_DOMAIN_VALIDATION */
5592 5578
5593EXPORT_SYMBOL(mptscsih_remove); 5579EXPORT_SYMBOL(mptscsih_remove);
@@ -5599,7 +5585,9 @@ EXPORT_SYMBOL(mptscsih_resume);
5599EXPORT_SYMBOL(mptscsih_proc_info); 5585EXPORT_SYMBOL(mptscsih_proc_info);
5600EXPORT_SYMBOL(mptscsih_info); 5586EXPORT_SYMBOL(mptscsih_info);
5601EXPORT_SYMBOL(mptscsih_qcmd); 5587EXPORT_SYMBOL(mptscsih_qcmd);
5588EXPORT_SYMBOL(mptscsih_target_alloc);
5602EXPORT_SYMBOL(mptscsih_slave_alloc); 5589EXPORT_SYMBOL(mptscsih_slave_alloc);
5590EXPORT_SYMBOL(mptscsih_target_destroy);
5603EXPORT_SYMBOL(mptscsih_slave_destroy); 5591EXPORT_SYMBOL(mptscsih_slave_destroy);
5604EXPORT_SYMBOL(mptscsih_slave_configure); 5592EXPORT_SYMBOL(mptscsih_slave_configure);
5605EXPORT_SYMBOL(mptscsih_abort); 5593EXPORT_SYMBOL(mptscsih_abort);
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 971fda4b8b57..d3cba12f4bd9 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -91,7 +91,9 @@ extern int mptscsih_resume(struct pci_dev *pdev);
91extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func); 91extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func);
92extern const char * mptscsih_info(struct Scsi_Host *SChost); 92extern const char * mptscsih_info(struct Scsi_Host *SChost);
93extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)); 93extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
94extern int mptscsih_target_alloc(struct scsi_target *starget);
94extern int mptscsih_slave_alloc(struct scsi_device *device); 95extern int mptscsih_slave_alloc(struct scsi_device *device);
96extern void mptscsih_target_destroy(struct scsi_target *starget);
95extern void mptscsih_slave_destroy(struct scsi_device *device); 97extern void mptscsih_slave_destroy(struct scsi_device *device);
96extern int mptscsih_slave_configure(struct scsi_device *device); 98extern int mptscsih_slave_configure(struct scsi_device *device);
97extern int mptscsih_abort(struct scsi_cmnd * SCpnt); 99extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 5c0e307d1d5d..ce332a6085e5 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -103,13 +103,16 @@ static int mptspiTaskCtx = -1;
103static int mptspiInternalCtx = -1; /* Used only for internal commands */ 103static int mptspiInternalCtx = -1; /* Used only for internal commands */
104 104
105static struct scsi_host_template mptspi_driver_template = { 105static struct scsi_host_template mptspi_driver_template = {
106 .module = THIS_MODULE,
106 .proc_name = "mptspi", 107 .proc_name = "mptspi",
107 .proc_info = mptscsih_proc_info, 108 .proc_info = mptscsih_proc_info,
108 .name = "MPT SPI Host", 109 .name = "MPT SPI Host",
109 .info = mptscsih_info, 110 .info = mptscsih_info,
110 .queuecommand = mptscsih_qcmd, 111 .queuecommand = mptscsih_qcmd,
112 .target_alloc = mptscsih_target_alloc,
111 .slave_alloc = mptscsih_slave_alloc, 113 .slave_alloc = mptscsih_slave_alloc,
112 .slave_configure = mptscsih_slave_configure, 114 .slave_configure = mptscsih_slave_configure,
115 .target_destroy = mptscsih_target_destroy,
113 .slave_destroy = mptscsih_slave_destroy, 116 .slave_destroy = mptscsih_slave_destroy,
114 .change_queue_depth = mptscsih_change_queue_depth, 117 .change_queue_depth = mptscsih_change_queue_depth,
115 .eh_abort_handler = mptscsih_abort, 118 .eh_abort_handler = mptscsih_abort,
@@ -177,13 +180,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
177 printk(MYIOC_s_WARN_FMT 180 printk(MYIOC_s_WARN_FMT
178 "Skipping because it's not operational!\n", 181 "Skipping because it's not operational!\n",
179 ioc->name); 182 ioc->name);
180 return -ENODEV; 183 error = -ENODEV;
184 goto out_mptspi_probe;
181 } 185 }
182 186
183 if (!ioc->active) { 187 if (!ioc->active) {
184 printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n", 188 printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
185 ioc->name); 189 ioc->name);
186 return -ENODEV; 190 error = -ENODEV;
191 goto out_mptspi_probe;
187 } 192 }
188 193
189 /* Sanity check - ensure at least 1 port is INITIATOR capable 194 /* Sanity check - ensure at least 1 port is INITIATOR capable
@@ -208,7 +213,8 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
208 printk(MYIOC_s_WARN_FMT 213 printk(MYIOC_s_WARN_FMT
209 "Unable to register controller with SCSI subsystem\n", 214 "Unable to register controller with SCSI subsystem\n",
210 ioc->name); 215 ioc->name);
211 return -1; 216 error = -1;
217 goto out_mptspi_probe;
212 } 218 }
213 219
214 spin_lock_irqsave(&ioc->FreeQlock, flags); 220 spin_lock_irqsave(&ioc->FreeQlock, flags);
@@ -286,7 +292,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
286 mem = kmalloc(sz, GFP_ATOMIC); 292 mem = kmalloc(sz, GFP_ATOMIC);
287 if (mem == NULL) { 293 if (mem == NULL) {
288 error = -ENOMEM; 294 error = -ENOMEM;
289 goto mptspi_probe_failed; 295 goto out_mptspi_probe;
290 } 296 }
291 297
292 memset(mem, 0, sz); 298 memset(mem, 0, sz);
@@ -304,14 +310,14 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
304 mem = kmalloc(sz, GFP_ATOMIC); 310 mem = kmalloc(sz, GFP_ATOMIC);
305 if (mem == NULL) { 311 if (mem == NULL) {
306 error = -ENOMEM; 312 error = -ENOMEM;
307 goto mptspi_probe_failed; 313 goto out_mptspi_probe;
308 } 314 }
309 315
310 memset(mem, 0, sz); 316 memset(mem, 0, sz);
311 hd->Targets = (VirtDevice **) mem; 317 hd->Targets = (VirtTarget **) mem;
312 318
313 dprintk((KERN_INFO 319 dprintk((KERN_INFO
314 " Targets @ %p, sz=%d\n", hd->Targets, sz)); 320 " vdev @ %p, sz=%d\n", hd->Targets, sz));
315 321
316 /* Clear the TM flags 322 /* Clear the TM flags
317 */ 323 */
@@ -385,13 +391,13 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
385 if(error) { 391 if(error) {
386 dprintk((KERN_ERR MYNAM 392 dprintk((KERN_ERR MYNAM
387 "scsi_add_host failed\n")); 393 "scsi_add_host failed\n"));
388 goto mptspi_probe_failed; 394 goto out_mptspi_probe;
389 } 395 }
390 396
391 scsi_scan_host(sh); 397 scsi_scan_host(sh);
392 return 0; 398 return 0;
393 399
394mptspi_probe_failed: 400out_mptspi_probe:
395 401
396 mptscsih_remove(pdev); 402 mptscsih_remove(pdev);
397 return error; 403 return error;
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig
index 43a942a29c2e..fef677103880 100644
--- a/drivers/message/i2o/Kconfig
+++ b/drivers/message/i2o/Kconfig
@@ -24,6 +24,18 @@ config I2O
24 24
25 If unsure, say N. 25 If unsure, say N.
26 26
27config I2O_LCT_NOTIFY_ON_CHANGES
28 bool "Enable LCT notification"
29 depends on I2O
30 default y
31 ---help---
32 Only say N here if you have a I2O controller from SUN. The SUN
33 firmware doesn't support LCT notification on changes. If this option
34 is enabled on such a controller the driver will hang up in a endless
35 loop. On all other controllers say Y.
36
37 If unsure, say Y.
38
27config I2O_EXT_ADAPTEC 39config I2O_EXT_ADAPTEC
28 bool "Enable Adaptec extensions" 40 bool "Enable Adaptec extensions"
29 depends on I2O 41 depends on I2O
diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c
index 151b228e1cb3..ac06f10c54ec 100644
--- a/drivers/message/i2o/bus-osm.c
+++ b/drivers/message/i2o/bus-osm.c
@@ -17,7 +17,7 @@
17#include <linux/i2o.h> 17#include <linux/i2o.h>
18 18
19#define OSM_NAME "bus-osm" 19#define OSM_NAME "bus-osm"
20#define OSM_VERSION "$Rev$" 20#define OSM_VERSION "1.317"
21#define OSM_DESCRIPTION "I2O Bus Adapter OSM" 21#define OSM_DESCRIPTION "I2O Bus Adapter OSM"
22 22
23static struct i2o_driver i2o_bus_driver; 23static struct i2o_driver i2o_bus_driver;
@@ -39,18 +39,18 @@ static struct i2o_class_id i2o_bus_class_id[] = {
39 */ 39 */
40static int i2o_bus_scan(struct i2o_device *dev) 40static int i2o_bus_scan(struct i2o_device *dev)
41{ 41{
42 struct i2o_message __iomem *msg; 42 struct i2o_message *msg;
43 u32 m;
44 43
45 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 44 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
46 if (m == I2O_QUEUE_EMPTY) 45 if (IS_ERR(msg))
47 return -ETIMEDOUT; 46 return -ETIMEDOUT;
48 47
49 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 48 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
50 writel(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.tid, 49 msg->u.head[1] =
51 &msg->u.head[1]); 50 cpu_to_le32(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.
51 tid);
52 52
53 return i2o_msg_post_wait(dev->iop, m, 60); 53 return i2o_msg_post_wait(dev->iop, msg, 60);
54}; 54};
55 55
56/** 56/**
@@ -59,8 +59,9 @@ static int i2o_bus_scan(struct i2o_device *dev)
59 * 59 *
60 * Returns count. 60 * Returns count.
61 */ 61 */
62static ssize_t i2o_bus_store_scan(struct device *d, struct device_attribute *attr, const char *buf, 62static ssize_t i2o_bus_store_scan(struct device *d,
63 size_t count) 63 struct device_attribute *attr,
64 const char *buf, size_t count)
64{ 65{
65 struct i2o_device *i2o_dev = to_i2o_device(d); 66 struct i2o_device *i2o_dev = to_i2o_device(d);
66 int rc; 67 int rc;
diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c
index 10432f665201..3bba7aa82e58 100644
--- a/drivers/message/i2o/config-osm.c
+++ b/drivers/message/i2o/config-osm.c
@@ -22,7 +22,7 @@
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23 23
24#define OSM_NAME "config-osm" 24#define OSM_NAME "config-osm"
25#define OSM_VERSION "1.248" 25#define OSM_VERSION "1.323"
26#define OSM_DESCRIPTION "I2O Configuration OSM" 26#define OSM_DESCRIPTION "I2O Configuration OSM"
27 27
28/* access mode user rw */ 28/* access mode user rw */
diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h
index 9eefedb16211..90628562851e 100644
--- a/drivers/message/i2o/core.h
+++ b/drivers/message/i2o/core.h
@@ -14,8 +14,6 @@
14 */ 14 */
15 15
16/* Exec-OSM */ 16/* Exec-OSM */
17extern struct bus_type i2o_bus_type;
18
19extern struct i2o_driver i2o_exec_driver; 17extern struct i2o_driver i2o_exec_driver;
20extern int i2o_exec_lct_get(struct i2o_controller *); 18extern int i2o_exec_lct_get(struct i2o_controller *);
21 19
@@ -23,6 +21,8 @@ extern int __init i2o_exec_init(void);
23extern void __exit i2o_exec_exit(void); 21extern void __exit i2o_exec_exit(void);
24 22
25/* driver */ 23/* driver */
24extern struct bus_type i2o_bus_type;
25
26extern int i2o_driver_dispatch(struct i2o_controller *, u32); 26extern int i2o_driver_dispatch(struct i2o_controller *, u32);
27 27
28extern int __init i2o_driver_init(void); 28extern int __init i2o_driver_init(void);
@@ -33,19 +33,27 @@ extern int __init i2o_pci_init(void);
33extern void __exit i2o_pci_exit(void); 33extern void __exit i2o_pci_exit(void);
34 34
35/* device */ 35/* device */
36extern struct device_attribute i2o_device_attrs[];
37
36extern void i2o_device_remove(struct i2o_device *); 38extern void i2o_device_remove(struct i2o_device *);
37extern int i2o_device_parse_lct(struct i2o_controller *); 39extern int i2o_device_parse_lct(struct i2o_controller *);
38 40
39/* IOP */ 41/* IOP */
40extern struct i2o_controller *i2o_iop_alloc(void); 42extern struct i2o_controller *i2o_iop_alloc(void);
41extern void i2o_iop_free(struct i2o_controller *); 43
44/**
45 * i2o_iop_free - Free the i2o_controller struct
46 * @c: I2O controller to free
47 */
48static inline void i2o_iop_free(struct i2o_controller *c)
49{
50 i2o_pool_free(&c->in_msg);
51 kfree(c);
52}
42 53
43extern int i2o_iop_add(struct i2o_controller *); 54extern int i2o_iop_add(struct i2o_controller *);
44extern void i2o_iop_remove(struct i2o_controller *); 55extern void i2o_iop_remove(struct i2o_controller *);
45 56
46/* config */
47extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
48
49/* control registers relative to c->base */ 57/* control registers relative to c->base */
50#define I2O_IRQ_STATUS 0x30 58#define I2O_IRQ_STATUS 0x30
51#define I2O_IRQ_MASK 0x34 59#define I2O_IRQ_MASK 0x34
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index 8eb50cdb8ae1..ee183053fa23 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -35,18 +35,18 @@
35static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd, 35static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd,
36 u32 type) 36 u32 type)
37{ 37{
38 struct i2o_message __iomem *msg; 38 struct i2o_message *msg;
39 u32 m;
40 39
41 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 40 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
42 if (m == I2O_QUEUE_EMPTY) 41 if (IS_ERR(msg))
43 return -ETIMEDOUT; 42 return PTR_ERR(msg);
44 43
45 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 44 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
46 writel(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid, &msg->u.head[1]); 45 msg->u.head[1] =
47 writel(type, &msg->body[0]); 46 cpu_to_le32(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid);
47 msg->body[0] = cpu_to_le32(type);
48 48
49 return i2o_msg_post_wait(dev->iop, m, 60); 49 return i2o_msg_post_wait(dev->iop, msg, 60);
50} 50}
51 51
52/** 52/**
@@ -123,7 +123,6 @@ int i2o_device_claim_release(struct i2o_device *dev)
123 return rc; 123 return rc;
124} 124}
125 125
126
127/** 126/**
128 * i2o_device_release - release the memory for a I2O device 127 * i2o_device_release - release the memory for a I2O device
129 * @dev: I2O device which should be released 128 * @dev: I2O device which should be released
@@ -140,10 +139,10 @@ static void i2o_device_release(struct device *dev)
140 kfree(i2o_dev); 139 kfree(i2o_dev);
141} 140}
142 141
143
144/** 142/**
145 * i2o_device_class_show_class_id - Displays class id of I2O device 143 * i2o_device_show_class_id - Displays class id of I2O device
146 * @cd: class device of which the class id should be displayed 144 * @dev: device of which the class id should be displayed
145 * @attr: pointer to device attribute
147 * @buf: buffer into which the class id should be printed 146 * @buf: buffer into which the class id should be printed
148 * 147 *
149 * Returns the number of bytes which are printed into the buffer. 148 * Returns the number of bytes which are printed into the buffer.
@@ -159,15 +158,15 @@ static ssize_t i2o_device_show_class_id(struct device *dev,
159} 158}
160 159
161/** 160/**
162 * i2o_device_class_show_tid - Displays TID of I2O device 161 * i2o_device_show_tid - Displays TID of I2O device
163 * @cd: class device of which the TID should be displayed 162 * @dev: device of which the TID should be displayed
164 * @buf: buffer into which the class id should be printed 163 * @attr: pointer to device attribute
164 * @buf: buffer into which the TID should be printed
165 * 165 *
166 * Returns the number of bytes which are printed into the buffer. 166 * Returns the number of bytes which are printed into the buffer.
167 */ 167 */
168static ssize_t i2o_device_show_tid(struct device *dev, 168static ssize_t i2o_device_show_tid(struct device *dev,
169 struct device_attribute *attr, 169 struct device_attribute *attr, char *buf)
170 char *buf)
171{ 170{
172 struct i2o_device *i2o_dev = to_i2o_device(dev); 171 struct i2o_device *i2o_dev = to_i2o_device(dev);
173 172
@@ -175,6 +174,7 @@ static ssize_t i2o_device_show_tid(struct device *dev,
175 return strlen(buf) + 1; 174 return strlen(buf) + 1;
176} 175}
177 176
177/* I2O device attributes */
178struct device_attribute i2o_device_attrs[] = { 178struct device_attribute i2o_device_attrs[] = {
179 __ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL), 179 __ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL),
180 __ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL), 180 __ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL),
@@ -193,12 +193,10 @@ static struct i2o_device *i2o_device_alloc(void)
193{ 193{
194 struct i2o_device *dev; 194 struct i2o_device *dev;
195 195
196 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 196 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
197 if (!dev) 197 if (!dev)
198 return ERR_PTR(-ENOMEM); 198 return ERR_PTR(-ENOMEM);
199 199
200 memset(dev, 0, sizeof(*dev));
201
202 INIT_LIST_HEAD(&dev->list); 200 INIT_LIST_HEAD(&dev->list);
203 init_MUTEX(&dev->lock); 201 init_MUTEX(&dev->lock);
204 202
@@ -209,66 +207,6 @@ static struct i2o_device *i2o_device_alloc(void)
209} 207}
210 208
211/** 209/**
212 * i2o_setup_sysfs_links - Adds attributes to the I2O device
213 * @cd: I2O class device which is added to the I2O device class
214 *
215 * This function get called when a I2O device is added to the class. It
216 * creates the attributes for each device and creates user/parent symlink
217 * if necessary.
218 *
219 * Returns 0 on success or negative error code on failure.
220 */
221static void i2o_setup_sysfs_links(struct i2o_device *i2o_dev)
222{
223 struct i2o_controller *c = i2o_dev->iop;
224 struct i2o_device *tmp;
225
226 /* create user entries for this device */
227 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
228 if (tmp && tmp != i2o_dev)
229 sysfs_create_link(&i2o_dev->device.kobj,
230 &tmp->device.kobj, "user");
231
232 /* create user entries refering to this device */
233 list_for_each_entry(tmp, &c->devices, list)
234 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid &&
235 tmp != i2o_dev)
236 sysfs_create_link(&tmp->device.kobj,
237 &i2o_dev->device.kobj, "user");
238
239 /* create parent entries for this device */
240 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
241 if (tmp && tmp != i2o_dev)
242 sysfs_create_link(&i2o_dev->device.kobj,
243 &tmp->device.kobj, "parent");
244
245 /* create parent entries refering to this device */
246 list_for_each_entry(tmp, &c->devices, list)
247 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid &&
248 tmp != i2o_dev)
249 sysfs_create_link(&tmp->device.kobj,
250 &i2o_dev->device.kobj, "parent");
251}
252
253static void i2o_remove_sysfs_links(struct i2o_device *i2o_dev)
254{
255 struct i2o_controller *c = i2o_dev->iop;
256 struct i2o_device *tmp;
257
258 sysfs_remove_link(&i2o_dev->device.kobj, "parent");
259 sysfs_remove_link(&i2o_dev->device.kobj, "user");
260
261 list_for_each_entry(tmp, &c->devices, list) {
262 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
263 sysfs_remove_link(&tmp->device.kobj, "parent");
264 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
265 sysfs_remove_link(&tmp->device.kobj, "user");
266 }
267}
268
269
270
271/**
272 * i2o_device_add - allocate a new I2O device and add it to the IOP 210 * i2o_device_add - allocate a new I2O device and add it to the IOP
273 * @iop: I2O controller where the device is on 211 * @iop: I2O controller where the device is on
274 * @entry: LCT entry of the I2O device 212 * @entry: LCT entry of the I2O device
@@ -282,33 +220,57 @@ static void i2o_remove_sysfs_links(struct i2o_device *i2o_dev)
282static struct i2o_device *i2o_device_add(struct i2o_controller *c, 220static struct i2o_device *i2o_device_add(struct i2o_controller *c,
283 i2o_lct_entry * entry) 221 i2o_lct_entry * entry)
284{ 222{
285 struct i2o_device *dev; 223 struct i2o_device *i2o_dev, *tmp;
286 224
287 dev = i2o_device_alloc(); 225 i2o_dev = i2o_device_alloc();
288 if (IS_ERR(dev)) { 226 if (IS_ERR(i2o_dev)) {
289 printk(KERN_ERR "i2o: unable to allocate i2o device\n"); 227 printk(KERN_ERR "i2o: unable to allocate i2o device\n");
290 return dev; 228 return i2o_dev;
291 } 229 }
292 230
293 dev->lct_data = *entry; 231 i2o_dev->lct_data = *entry;
294 dev->iop = c;
295 232
296 snprintf(dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit, 233 snprintf(i2o_dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit,
297 dev->lct_data.tid); 234 i2o_dev->lct_data.tid);
298 235
299 dev->device.parent = &c->device; 236 i2o_dev->iop = c;
237 i2o_dev->device.parent = &c->device;
300 238
301 device_register(&dev->device); 239 device_register(&i2o_dev->device);
302 240
303 list_add_tail(&dev->list, &c->devices); 241 list_add_tail(&i2o_dev->list, &c->devices);
304 242
305 i2o_setup_sysfs_links(dev); 243 /* create user entries for this device */
244 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
245 if (tmp && (tmp != i2o_dev))
246 sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
247 "user");
306 248
307 i2o_driver_notify_device_add_all(dev); 249 /* create user entries refering to this device */
250 list_for_each_entry(tmp, &c->devices, list)
251 if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
252 && (tmp != i2o_dev))
253 sysfs_create_link(&tmp->device.kobj,
254 &i2o_dev->device.kobj, "user");
308 255
309 pr_debug("i2o: device %s added\n", dev->device.bus_id); 256 /* create parent entries for this device */
257 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
258 if (tmp && (tmp != i2o_dev))
259 sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
260 "parent");
310 261
311 return dev; 262 /* create parent entries refering to this device */
263 list_for_each_entry(tmp, &c->devices, list)
264 if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
265 && (tmp != i2o_dev))
266 sysfs_create_link(&tmp->device.kobj,
267 &i2o_dev->device.kobj, "parent");
268
269 i2o_driver_notify_device_add_all(i2o_dev);
270
271 pr_debug("i2o: device %s added\n", i2o_dev->device.bus_id);
272
273 return i2o_dev;
312} 274}
313 275
314/** 276/**
@@ -321,9 +283,22 @@ static struct i2o_device *i2o_device_add(struct i2o_controller *c,
321 */ 283 */
322void i2o_device_remove(struct i2o_device *i2o_dev) 284void i2o_device_remove(struct i2o_device *i2o_dev)
323{ 285{
286 struct i2o_device *tmp;
287 struct i2o_controller *c = i2o_dev->iop;
288
324 i2o_driver_notify_device_remove_all(i2o_dev); 289 i2o_driver_notify_device_remove_all(i2o_dev);
325 i2o_remove_sysfs_links(i2o_dev); 290
291 sysfs_remove_link(&i2o_dev->device.kobj, "parent");
292 sysfs_remove_link(&i2o_dev->device.kobj, "user");
293
294 list_for_each_entry(tmp, &c->devices, list) {
295 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
296 sysfs_remove_link(&tmp->device.kobj, "parent");
297 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
298 sysfs_remove_link(&tmp->device.kobj, "user");
299 }
326 list_del(&i2o_dev->list); 300 list_del(&i2o_dev->list);
301
327 device_unregister(&i2o_dev->device); 302 device_unregister(&i2o_dev->device);
328} 303}
329 304
@@ -341,56 +316,83 @@ int i2o_device_parse_lct(struct i2o_controller *c)
341{ 316{
342 struct i2o_device *dev, *tmp; 317 struct i2o_device *dev, *tmp;
343 i2o_lct *lct; 318 i2o_lct *lct;
344 int i; 319 u32 *dlct = c->dlct.virt;
345 int max; 320 int max = 0, i = 0;
321 u16 table_size;
322 u32 buf;
346 323
347 down(&c->lct_lock); 324 down(&c->lct_lock);
348 325
349 kfree(c->lct); 326 kfree(c->lct);
350 327
351 lct = c->dlct.virt; 328 buf = le32_to_cpu(*dlct++);
329 table_size = buf & 0xffff;
352 330
353 c->lct = kmalloc(lct->table_size * 4, GFP_KERNEL); 331 lct = c->lct = kmalloc(table_size * 4, GFP_KERNEL);
354 if (!c->lct) { 332 if (!lct) {
355 up(&c->lct_lock); 333 up(&c->lct_lock);
356 return -ENOMEM; 334 return -ENOMEM;
357 } 335 }
358 336
359 if (lct->table_size * 4 > c->dlct.len) { 337 lct->lct_ver = buf >> 28;
360 memcpy(c->lct, c->dlct.virt, c->dlct.len); 338 lct->boot_tid = buf >> 16 & 0xfff;
361 up(&c->lct_lock); 339 lct->table_size = table_size;
362 return -EAGAIN; 340 lct->change_ind = le32_to_cpu(*dlct++);
363 } 341 lct->iop_flags = le32_to_cpu(*dlct++);
364 342
365 memcpy(c->lct, c->dlct.virt, lct->table_size * 4); 343 table_size -= 3;
366
367 lct = c->lct;
368
369 max = (lct->table_size - 3) / 9;
370 344
371 pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max, 345 pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max,
372 lct->table_size); 346 lct->table_size);
373 347
374 /* remove devices, which are not in the LCT anymore */ 348 while (table_size > 0) {
375 list_for_each_entry_safe(dev, tmp, &c->devices, list) { 349 i2o_lct_entry *entry = &lct->lct_entry[max];
376 int found = 0; 350 int found = 0;
377 351
378 for (i = 0; i < max; i++) { 352 buf = le32_to_cpu(*dlct++);
379 if (lct->lct_entry[i].tid == dev->lct_data.tid) { 353 entry->entry_size = buf & 0xffff;
354 entry->tid = buf >> 16 & 0xfff;
355
356 entry->change_ind = le32_to_cpu(*dlct++);
357 entry->device_flags = le32_to_cpu(*dlct++);
358
359 buf = le32_to_cpu(*dlct++);
360 entry->class_id = buf & 0xfff;
361 entry->version = buf >> 12 & 0xf;
362 entry->vendor_id = buf >> 16;
363
364 entry->sub_class = le32_to_cpu(*dlct++);
365
366 buf = le32_to_cpu(*dlct++);
367 entry->user_tid = buf & 0xfff;
368 entry->parent_tid = buf >> 12 & 0xfff;
369 entry->bios_info = buf >> 24;
370
371 memcpy(&entry->identity_tag, dlct, 8);
372 dlct += 2;
373
374 entry->event_capabilities = le32_to_cpu(*dlct++);
375
376 /* add new devices, which are new in the LCT */
377 list_for_each_entry_safe(dev, tmp, &c->devices, list) {
378 if (entry->tid == dev->lct_data.tid) {
380 found = 1; 379 found = 1;
381 break; 380 break;
382 } 381 }
383 } 382 }
384 383
385 if (!found) 384 if (!found)
386 i2o_device_remove(dev); 385 i2o_device_add(c, entry);
386
387 table_size -= 9;
388 max++;
387 } 389 }
388 390
389 /* add new devices, which are new in the LCT */ 391 /* remove devices, which are not in the LCT anymore */
390 for (i = 0; i < max; i++) { 392 list_for_each_entry_safe(dev, tmp, &c->devices, list) {
391 int found = 0; 393 int found = 0;
392 394
393 list_for_each_entry_safe(dev, tmp, &c->devices, list) { 395 for (i = 0; i < max; i++) {
394 if (lct->lct_entry[i].tid == dev->lct_data.tid) { 396 if (lct->lct_entry[i].tid == dev->lct_data.tid) {
395 found = 1; 397 found = 1;
396 break; 398 break;
@@ -398,14 +400,14 @@ int i2o_device_parse_lct(struct i2o_controller *c)
398 } 400 }
399 401
400 if (!found) 402 if (!found)
401 i2o_device_add(c, &lct->lct_entry[i]); 403 i2o_device_remove(dev);
402 } 404 }
405
403 up(&c->lct_lock); 406 up(&c->lct_lock);
404 407
405 return 0; 408 return 0;
406} 409}
407 410
408
409/* 411/*
410 * Run time support routines 412 * Run time support routines
411 */ 413 */
@@ -419,13 +421,9 @@ int i2o_device_parse_lct(struct i2o_controller *c)
419 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. 421 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
420 */ 422 */
421int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, 423int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
422 int oplen, void *reslist, int reslen) 424 int oplen, void *reslist, int reslen)
423{ 425{
424 struct i2o_message __iomem *msg; 426 struct i2o_message *msg;
425 u32 m;
426 u32 *res32 = (u32 *) reslist;
427 u32 *restmp = (u32 *) reslist;
428 int len = 0;
429 int i = 0; 427 int i = 0;
430 int rc; 428 int rc;
431 struct i2o_dma res; 429 struct i2o_dma res;
@@ -437,26 +435,27 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
437 if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL)) 435 if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL))
438 return -ENOMEM; 436 return -ENOMEM;
439 437
440 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 438 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
441 if (m == I2O_QUEUE_EMPTY) { 439 if (IS_ERR(msg)) {
442 i2o_dma_free(dev, &res); 440 i2o_dma_free(dev, &res);
443 return -ETIMEDOUT; 441 return PTR_ERR(msg);
444 } 442 }
445 443
446 i = 0; 444 i = 0;
447 writel(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid, 445 msg->u.head[1] =
448 &msg->u.head[1]); 446 cpu_to_le32(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid);
449 writel(0, &msg->body[i++]); 447 msg->body[i++] = cpu_to_le32(0x00000000);
450 writel(0x4C000000 | oplen, &msg->body[i++]); /* OperationList */ 448 msg->body[i++] = cpu_to_le32(0x4C000000 | oplen); /* OperationList */
451 memcpy_toio(&msg->body[i], oplist, oplen); 449 memcpy(&msg->body[i], oplist, oplen);
452 i += (oplen / 4 + (oplen % 4 ? 1 : 0)); 450 i += (oplen / 4 + (oplen % 4 ? 1 : 0));
453 writel(0xD0000000 | res.len, &msg->body[i++]); /* ResultList */ 451 msg->body[i++] = cpu_to_le32(0xD0000000 | res.len); /* ResultList */
454 writel(res.phys, &msg->body[i++]); 452 msg->body[i++] = cpu_to_le32(res.phys);
455 453
456 writel(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) | 454 msg->u.head[0] =
457 SGL_OFFSET_5, &msg->u.head[0]); 455 cpu_to_le32(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) |
456 SGL_OFFSET_5);
458 457
459 rc = i2o_msg_post_wait_mem(c, m, 10, &res); 458 rc = i2o_msg_post_wait_mem(c, msg, 10, &res);
460 459
461 /* This only looks like a memory leak - don't "fix" it. */ 460 /* This only looks like a memory leak - don't "fix" it. */
462 if (rc == -ETIMEDOUT) 461 if (rc == -ETIMEDOUT)
@@ -465,36 +464,7 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
465 memcpy(reslist, res.virt, res.len); 464 memcpy(reslist, res.virt, res.len);
466 i2o_dma_free(dev, &res); 465 i2o_dma_free(dev, &res);
467 466
468 /* Query failed */ 467 return rc;
469 if (rc)
470 return rc;
471 /*
472 * Calculate number of bytes of Result LIST
473 * We need to loop through each Result BLOCK and grab the length
474 */
475 restmp = res32 + 1;
476 len = 1;
477 for (i = 0; i < (res32[0] & 0X0000FFFF); i++) {
478 if (restmp[0] & 0x00FF0000) { /* BlockStatus != SUCCESS */
479 printk(KERN_WARNING
480 "%s - Error:\n ErrorInfoSize = 0x%02x, "
481 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
482 (cmd ==
483 I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET" :
484 "PARAMS_GET", res32[1] >> 24,
485 (res32[1] >> 16) & 0xFF, res32[1] & 0xFFFF);
486
487 /*
488 * If this is the only request,than we return an error
489 */
490 if ((res32[0] & 0x0000FFFF) == 1) {
491 return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */
492 }
493 }
494 len += restmp[0] & 0x0000FFFF; /* Length of res BLOCK */
495 restmp += restmp[0] & 0x0000FFFF; /* Skip to next BLOCK */
496 }
497 return (len << 2); /* bytes used by result list */
498} 468}
499 469
500/* 470/*
@@ -503,28 +473,25 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
503int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field, 473int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
504 void *buf, int buflen) 474 void *buf, int buflen)
505{ 475{
506 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; 476 u32 opblk[] = { cpu_to_le32(0x00000001),
477 cpu_to_le32((u16) group << 16 | I2O_PARAMS_FIELD_GET),
478 cpu_to_le32((s16) field << 16 | 0x00000001)
479 };
507 u8 *resblk; /* 8 bytes for header */ 480 u8 *resblk; /* 8 bytes for header */
508 int size; 481 int rc;
509
510 if (field == -1) /* whole group */
511 opblk[4] = -1;
512 482
513 resblk = kmalloc(buflen + 8, GFP_KERNEL | GFP_ATOMIC); 483 resblk = kmalloc(buflen + 8, GFP_KERNEL | GFP_ATOMIC);
514 if (!resblk) 484 if (!resblk)
515 return -ENOMEM; 485 return -ENOMEM;
516 486
517 size = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk, 487 rc = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
518 sizeof(opblk), resblk, buflen + 8); 488 sizeof(opblk), resblk, buflen + 8);
519 489
520 memcpy(buf, resblk + 8, buflen); /* cut off header */ 490 memcpy(buf, resblk + 8, buflen); /* cut off header */
521 491
522 kfree(resblk); 492 kfree(resblk);
523 493
524 if (size > buflen) 494 return rc;
525 return buflen;
526
527 return size;
528} 495}
529 496
530/* 497/*
@@ -534,12 +501,12 @@ int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
534 * else return specific fields 501 * else return specific fields
535 * ibuf contains fieldindexes 502 * ibuf contains fieldindexes
536 * 503 *
537 * if oper == I2O_PARAMS_LIST_GET, get from specific rows 504 * if oper == I2O_PARAMS_LIST_GET, get from specific rows
538 * if fieldcount == -1 return all fields 505 * if fieldcount == -1 return all fields
539 * ibuf contains rowcount, keyvalues 506 * ibuf contains rowcount, keyvalues
540 * else return specific fields 507 * else return specific fields
541 * fieldcount is # of fieldindexes 508 * fieldcount is # of fieldindexes
542 * ibuf contains fieldindexes, rowcount, keyvalues 509 * ibuf contains fieldindexes, rowcount, keyvalues
543 * 510 *
544 * You could also use directly function i2o_issue_params(). 511 * You could also use directly function i2o_issue_params().
545 */ 512 */
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 0fb9c4e2ad4c..64130227574f 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -61,12 +61,10 @@ static int i2o_bus_match(struct device *dev, struct device_driver *drv)
61}; 61};
62 62
63/* I2O bus type */ 63/* I2O bus type */
64extern struct device_attribute i2o_device_attrs[];
65
66struct bus_type i2o_bus_type = { 64struct bus_type i2o_bus_type = {
67 .name = "i2o", 65 .name = "i2o",
68 .match = i2o_bus_match, 66 .match = i2o_bus_match,
69 .dev_attrs = i2o_device_attrs, 67 .dev_attrs = i2o_device_attrs
70}; 68};
71 69
72/** 70/**
@@ -219,14 +217,14 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m)
219 /* cut of header from message size (in 32-bit words) */ 217 /* cut of header from message size (in 32-bit words) */
220 size = (le32_to_cpu(msg->u.head[0]) >> 16) - 5; 218 size = (le32_to_cpu(msg->u.head[0]) >> 16) - 5;
221 219
222 evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC | __GFP_ZERO); 220 evt = kzalloc(size * 4 + sizeof(*evt), GFP_ATOMIC);
223 if (!evt) 221 if (!evt)
224 return -ENOMEM; 222 return -ENOMEM;
225 223
226 evt->size = size; 224 evt->size = size;
227 evt->tcntxt = le32_to_cpu(msg->u.s.tcntxt); 225 evt->tcntxt = le32_to_cpu(msg->u.s.tcntxt);
228 evt->event_indicator = le32_to_cpu(msg->body[0]); 226 evt->event_indicator = le32_to_cpu(msg->body[0]);
229 memcpy(&evt->tcntxt, &msg->u.s.tcntxt, size * 4); 227 memcpy(&evt->data, &msg->body[1], size * 4);
230 228
231 list_for_each_entry_safe(dev, tmp, &c->devices, list) 229 list_for_each_entry_safe(dev, tmp, &c->devices, list)
232 if (dev->lct_data.tid == tid) { 230 if (dev->lct_data.tid == tid) {
@@ -349,12 +347,10 @@ int __init i2o_driver_init(void)
349 osm_info("max drivers = %d\n", i2o_max_drivers); 347 osm_info("max drivers = %d\n", i2o_max_drivers);
350 348
351 i2o_drivers = 349 i2o_drivers =
352 kmalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL); 350 kzalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL);
353 if (!i2o_drivers) 351 if (!i2o_drivers)
354 return -ENOMEM; 352 return -ENOMEM;
355 353
356 memset(i2o_drivers, 0, i2o_max_drivers * sizeof(*i2o_drivers));
357
358 rc = bus_register(&i2o_bus_type); 354 rc = bus_register(&i2o_bus_type);
359 355
360 if (rc < 0) 356 if (rc < 0)
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 9c339a2505b0..9bb9859f6dfe 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -33,7 +33,7 @@
33#include <linux/workqueue.h> 33#include <linux/workqueue.h>
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */ 36#include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */
37#include <asm/param.h> /* HZ */ 37#include <asm/param.h> /* HZ */
38#include "core.h" 38#include "core.h"
39 39
@@ -75,11 +75,9 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
75{ 75{
76 struct i2o_exec_wait *wait; 76 struct i2o_exec_wait *wait;
77 77
78 wait = kmalloc(sizeof(*wait), GFP_KERNEL); 78 wait = kzalloc(sizeof(*wait), GFP_KERNEL);
79 if (!wait) 79 if (!wait)
80 return ERR_PTR(-ENOMEM); 80 return NULL;
81
82 memset(wait, 0, sizeof(*wait));
83 81
84 INIT_LIST_HEAD(&wait->list); 82 INIT_LIST_HEAD(&wait->list);
85 83
@@ -114,13 +112,12 @@ static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
114 * Returns 0 on success, negative error code on timeout or positive error 112 * Returns 0 on success, negative error code on timeout or positive error
115 * code from reply. 113 * code from reply.
116 */ 114 */
117int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long 115int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
118 timeout, struct i2o_dma *dma) 116 unsigned long timeout, struct i2o_dma *dma)
119{ 117{
120 DECLARE_WAIT_QUEUE_HEAD(wq); 118 DECLARE_WAIT_QUEUE_HEAD(wq);
121 struct i2o_exec_wait *wait; 119 struct i2o_exec_wait *wait;
122 static u32 tcntxt = 0x80000000; 120 static u32 tcntxt = 0x80000000;
123 struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
124 int rc = 0; 121 int rc = 0;
125 122
126 wait = i2o_exec_wait_alloc(); 123 wait = i2o_exec_wait_alloc();
@@ -138,15 +135,15 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long
138 * We will only use transaction contexts >= 0x80000000 for POST WAIT, 135 * We will only use transaction contexts >= 0x80000000 for POST WAIT,
139 * so we could find a POST WAIT reply easier in the reply handler. 136 * so we could find a POST WAIT reply easier in the reply handler.
140 */ 137 */
141 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 138 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
142 wait->tcntxt = tcntxt++; 139 wait->tcntxt = tcntxt++;
143 writel(wait->tcntxt, &msg->u.s.tcntxt); 140 msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
144 141
145 /* 142 /*
146 * Post the message to the controller. At some point later it will 143 * Post the message to the controller. At some point later it will
147 * return. If we time out before it returns then complete will be zero. 144 * return. If we time out before it returns then complete will be zero.
148 */ 145 */
149 i2o_msg_post(c, m); 146 i2o_msg_post(c, msg);
150 147
151 if (!wait->complete) { 148 if (!wait->complete) {
152 wait->wq = &wq; 149 wait->wq = &wq;
@@ -266,13 +263,14 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
266 * 263 *
267 * Returns number of bytes printed into buffer. 264 * Returns number of bytes printed into buffer.
268 */ 265 */
269static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute *attr, char *buf) 266static ssize_t i2o_exec_show_vendor_id(struct device *d,
267 struct device_attribute *attr, char *buf)
270{ 268{
271 struct i2o_device *dev = to_i2o_device(d); 269 struct i2o_device *dev = to_i2o_device(d);
272 u16 id; 270 u16 id;
273 271
274 if (i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) { 272 if (!i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) {
275 sprintf(buf, "0x%04x", id); 273 sprintf(buf, "0x%04x", le16_to_cpu(id));
276 return strlen(buf) + 1; 274 return strlen(buf) + 1;
277 } 275 }
278 276
@@ -286,13 +284,15 @@ static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute
286 * 284 *
287 * Returns number of bytes printed into buffer. 285 * Returns number of bytes printed into buffer.
288 */ 286 */
289static ssize_t i2o_exec_show_product_id(struct device *d, struct device_attribute *attr, char *buf) 287static ssize_t i2o_exec_show_product_id(struct device *d,
288 struct device_attribute *attr,
289 char *buf)
290{ 290{
291 struct i2o_device *dev = to_i2o_device(d); 291 struct i2o_device *dev = to_i2o_device(d);
292 u16 id; 292 u16 id;
293 293
294 if (i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) { 294 if (!i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) {
295 sprintf(buf, "0x%04x", id); 295 sprintf(buf, "0x%04x", le16_to_cpu(id));
296 return strlen(buf) + 1; 296 return strlen(buf) + 1;
297 } 297 }
298 298
@@ -362,7 +362,9 @@ static void i2o_exec_lct_modified(struct i2o_controller *c)
362 if (i2o_device_parse_lct(c) != -EAGAIN) 362 if (i2o_device_parse_lct(c) != -EAGAIN)
363 change_ind = c->lct->change_ind + 1; 363 change_ind = c->lct->change_ind + 1;
364 364
365#ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES
365 i2o_exec_lct_notify(c, change_ind); 366 i2o_exec_lct_notify(c, change_ind);
367#endif
366}; 368};
367 369
368/** 370/**
@@ -385,23 +387,22 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
385 u32 context; 387 u32 context;
386 388
387 if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) { 389 if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) {
390 struct i2o_message __iomem *pmsg;
391 u32 pm;
392
388 /* 393 /*
389 * If Fail bit is set we must take the transaction context of 394 * If Fail bit is set we must take the transaction context of
390 * the preserved message to find the right request again. 395 * the preserved message to find the right request again.
391 */ 396 */
392 struct i2o_message __iomem *pmsg;
393 u32 pm;
394 397
395 pm = le32_to_cpu(msg->body[3]); 398 pm = le32_to_cpu(msg->body[3]);
396
397 pmsg = i2o_msg_in_to_virt(c, pm); 399 pmsg = i2o_msg_in_to_virt(c, pm);
400 context = readl(&pmsg->u.s.tcntxt);
398 401
399 i2o_report_status(KERN_INFO, "i2o_core", msg); 402 i2o_report_status(KERN_INFO, "i2o_core", msg);
400 403
401 context = readl(&pmsg->u.s.tcntxt);
402
403 /* Release the preserved msg */ 404 /* Release the preserved msg */
404 i2o_msg_nop(c, pm); 405 i2o_msg_nop_mfa(c, pm);
405 } else 406 } else
406 context = le32_to_cpu(msg->u.s.tcntxt); 407 context = le32_to_cpu(msg->u.s.tcntxt);
407 408
@@ -462,25 +463,26 @@ static void i2o_exec_event(struct i2o_event *evt)
462 */ 463 */
463int i2o_exec_lct_get(struct i2o_controller *c) 464int i2o_exec_lct_get(struct i2o_controller *c)
464{ 465{
465 struct i2o_message __iomem *msg; 466 struct i2o_message *msg;
466 u32 m;
467 int i = 0; 467 int i = 0;
468 int rc = -EAGAIN; 468 int rc = -EAGAIN;
469 469
470 for (i = 1; i <= I2O_LCT_GET_TRIES; i++) { 470 for (i = 1; i <= I2O_LCT_GET_TRIES; i++) {
471 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 471 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
472 if (m == I2O_QUEUE_EMPTY) 472 if (IS_ERR(msg))
473 return -ETIMEDOUT; 473 return PTR_ERR(msg);
474 474
475 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); 475 msg->u.head[0] =
476 writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID, 476 cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
477 &msg->u.head[1]); 477 msg->u.head[1] =
478 writel(0xffffffff, &msg->body[0]); 478 cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
479 writel(0x00000000, &msg->body[1]); 479 ADAPTER_TID);
480 writel(0xd0000000 | c->dlct.len, &msg->body[2]); 480 msg->body[0] = cpu_to_le32(0xffffffff);
481 writel(c->dlct.phys, &msg->body[3]); 481 msg->body[1] = cpu_to_le32(0x00000000);
482 482 msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
483 rc = i2o_msg_post_wait(c, m, I2O_TIMEOUT_LCT_GET); 483 msg->body[3] = cpu_to_le32(c->dlct.phys);
484
485 rc = i2o_msg_post_wait(c, msg, I2O_TIMEOUT_LCT_GET);
484 if (rc < 0) 486 if (rc < 0)
485 break; 487 break;
486 488
@@ -506,29 +508,29 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
506{ 508{
507 i2o_status_block *sb = c->status_block.virt; 509 i2o_status_block *sb = c->status_block.virt;
508 struct device *dev; 510 struct device *dev;
509 struct i2o_message __iomem *msg; 511 struct i2o_message *msg;
510 u32 m;
511 512
512 dev = &c->pdev->dev; 513 dev = &c->pdev->dev;
513 514
514 if (i2o_dma_realloc(dev, &c->dlct, sb->expected_lct_size, GFP_KERNEL)) 515 if (i2o_dma_realloc
516 (dev, &c->dlct, le32_to_cpu(sb->expected_lct_size), GFP_KERNEL))
515 return -ENOMEM; 517 return -ENOMEM;
516 518
517 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 519 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
518 if (m == I2O_QUEUE_EMPTY) 520 if (IS_ERR(msg))
519 return -ETIMEDOUT; 521 return PTR_ERR(msg);
520 522
521 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); 523 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
522 writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID, 524 msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
523 &msg->u.head[1]); 525 ADAPTER_TID);
524 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 526 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
525 writel(0, &msg->u.s.tcntxt); /* FIXME */ 527 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
526 writel(0xffffffff, &msg->body[0]); 528 msg->body[0] = cpu_to_le32(0xffffffff);
527 writel(change_ind, &msg->body[1]); 529 msg->body[1] = cpu_to_le32(change_ind);
528 writel(0xd0000000 | c->dlct.len, &msg->body[2]); 530 msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
529 writel(c->dlct.phys, &msg->body[3]); 531 msg->body[3] = cpu_to_le32(c->dlct.phys);
530 532
531 i2o_msg_post(c, m); 533 i2o_msg_post(c, msg);
532 534
533 return 0; 535 return 0;
534}; 536};
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index f283b5bafdd3..5b1febed3133 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -59,10 +59,12 @@
59#include <linux/blkdev.h> 59#include <linux/blkdev.h>
60#include <linux/hdreg.h> 60#include <linux/hdreg.h>
61 61
62#include <scsi/scsi.h>
63
62#include "i2o_block.h" 64#include "i2o_block.h"
63 65
64#define OSM_NAME "block-osm" 66#define OSM_NAME "block-osm"
65#define OSM_VERSION "1.287" 67#define OSM_VERSION "1.325"
66#define OSM_DESCRIPTION "I2O Block Device OSM" 68#define OSM_DESCRIPTION "I2O Block Device OSM"
67 69
68static struct i2o_driver i2o_block_driver; 70static struct i2o_driver i2o_block_driver;
@@ -130,20 +132,20 @@ static int i2o_block_remove(struct device *dev)
130 */ 132 */
131static int i2o_block_device_flush(struct i2o_device *dev) 133static int i2o_block_device_flush(struct i2o_device *dev)
132{ 134{
133 struct i2o_message __iomem *msg; 135 struct i2o_message *msg;
134 u32 m;
135 136
136 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 137 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
137 if (m == I2O_QUEUE_EMPTY) 138 if (IS_ERR(msg))
138 return -ETIMEDOUT; 139 return PTR_ERR(msg);
139 140
140 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 141 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
141 writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid, 142 msg->u.head[1] =
142 &msg->u.head[1]); 143 cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->
143 writel(60 << 16, &msg->body[0]); 144 lct_data.tid);
145 msg->body[0] = cpu_to_le32(60 << 16);
144 osm_debug("Flushing...\n"); 146 osm_debug("Flushing...\n");
145 147
146 return i2o_msg_post_wait(dev->iop, m, 60); 148 return i2o_msg_post_wait(dev->iop, msg, 60);
147}; 149};
148 150
149/** 151/**
@@ -181,21 +183,21 @@ static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk,
181 */ 183 */
182static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) 184static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
183{ 185{
184 struct i2o_message __iomem *msg; 186 struct i2o_message *msg;
185 u32 m; 187
186 188 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
187 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 189 if (IS_ERR(msg))
188 if (m == I2O_QUEUE_EMPTY) 190 return PTR_ERR(msg);
189 return -ETIMEDOUT; 191
190 192 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
191 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 193 msg->u.head[1] =
192 writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid, 194 cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->
193 &msg->u.head[1]); 195 lct_data.tid);
194 writel(-1, &msg->body[0]); 196 msg->body[0] = cpu_to_le32(-1);
195 writel(0, &msg->body[1]); 197 msg->body[1] = cpu_to_le32(0x00000000);
196 osm_debug("Mounting...\n"); 198 osm_debug("Mounting...\n");
197 199
198 return i2o_msg_post_wait(dev->iop, m, 2); 200 return i2o_msg_post_wait(dev->iop, msg, 2);
199}; 201};
200 202
201/** 203/**
@@ -210,20 +212,20 @@ static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
210 */ 212 */
211static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) 213static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
212{ 214{
213 struct i2o_message __iomem *msg; 215 struct i2o_message *msg;
214 u32 m;
215 216
216 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 217 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
217 if (m == I2O_QUEUE_EMPTY) 218 if (IS_ERR(msg) == I2O_QUEUE_EMPTY)
218 return -ETIMEDOUT; 219 return PTR_ERR(msg);
219 220
220 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 221 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
221 writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, 222 msg->u.head[1] =
222 &msg->u.head[1]); 223 cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->
223 writel(-1, &msg->body[0]); 224 lct_data.tid);
225 msg->body[0] = cpu_to_le32(-1);
224 osm_debug("Locking...\n"); 226 osm_debug("Locking...\n");
225 227
226 return i2o_msg_post_wait(dev->iop, m, 2); 228 return i2o_msg_post_wait(dev->iop, msg, 2);
227}; 229};
228 230
229/** 231/**
@@ -238,20 +240,20 @@ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
238 */ 240 */
239static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) 241static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
240{ 242{
241 struct i2o_message __iomem *msg; 243 struct i2o_message *msg;
242 u32 m;
243 244
244 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 245 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
245 if (m == I2O_QUEUE_EMPTY) 246 if (IS_ERR(msg))
246 return -ETIMEDOUT; 247 return PTR_ERR(msg);
247 248
248 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 249 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
249 writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, 250 msg->u.head[1] =
250 &msg->u.head[1]); 251 cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->
251 writel(media_id, &msg->body[0]); 252 lct_data.tid);
253 msg->body[0] = cpu_to_le32(media_id);
252 osm_debug("Unlocking...\n"); 254 osm_debug("Unlocking...\n");
253 255
254 return i2o_msg_post_wait(dev->iop, m, 2); 256 return i2o_msg_post_wait(dev->iop, msg, 2);
255}; 257};
256 258
257/** 259/**
@@ -267,21 +269,21 @@ static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
267{ 269{
268 struct i2o_device *i2o_dev = dev->i2o_dev; 270 struct i2o_device *i2o_dev = dev->i2o_dev;
269 struct i2o_controller *c = i2o_dev->iop; 271 struct i2o_controller *c = i2o_dev->iop;
270 struct i2o_message __iomem *msg; 272 struct i2o_message *msg;
271 u32 m;
272 int rc; 273 int rc;
273 274
274 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 275 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
275 if (m == I2O_QUEUE_EMPTY) 276 if (IS_ERR(msg))
276 return -ETIMEDOUT; 277 return PTR_ERR(msg);
277 278
278 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 279 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
279 writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data. 280 msg->u.head[1] =
280 tid, &msg->u.head[1]); 281 cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->
281 writel(op << 24, &msg->body[0]); 282 lct_data.tid);
283 msg->body[0] = cpu_to_le32(op << 24);
282 osm_debug("Power...\n"); 284 osm_debug("Power...\n");
283 285
284 rc = i2o_msg_post_wait(c, m, 60); 286 rc = i2o_msg_post_wait(c, msg, 60);
285 if (!rc) 287 if (!rc)
286 dev->power = op; 288 dev->power = op;
287 289
@@ -331,7 +333,7 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq)
331 */ 333 */
332static inline int i2o_block_sglist_alloc(struct i2o_controller *c, 334static inline int i2o_block_sglist_alloc(struct i2o_controller *c,
333 struct i2o_block_request *ireq, 335 struct i2o_block_request *ireq,
334 u32 __iomem ** mptr) 336 u32 ** mptr)
335{ 337{
336 int nents; 338 int nents;
337 enum dma_data_direction direction; 339 enum dma_data_direction direction;
@@ -466,7 +468,7 @@ static void i2o_block_end_request(struct request *req, int uptodate,
466 468
467 spin_lock_irqsave(q->queue_lock, flags); 469 spin_lock_irqsave(q->queue_lock, flags);
468 470
469 end_that_request_last(req); 471 end_that_request_last(req, uptodate);
470 472
471 if (likely(dev)) { 473 if (likely(dev)) {
472 dev->open_queue_depth--; 474 dev->open_queue_depth--;
@@ -745,10 +747,9 @@ static int i2o_block_transfer(struct request *req)
745 struct i2o_block_device *dev = req->rq_disk->private_data; 747 struct i2o_block_device *dev = req->rq_disk->private_data;
746 struct i2o_controller *c; 748 struct i2o_controller *c;
747 int tid = dev->i2o_dev->lct_data.tid; 749 int tid = dev->i2o_dev->lct_data.tid;
748 struct i2o_message __iomem *msg; 750 struct i2o_message *msg;
749 u32 __iomem *mptr; 751 u32 *mptr;
750 struct i2o_block_request *ireq = req->special; 752 struct i2o_block_request *ireq = req->special;
751 u32 m;
752 u32 tcntxt; 753 u32 tcntxt;
753 u32 sgl_offset = SGL_OFFSET_8; 754 u32 sgl_offset = SGL_OFFSET_8;
754 u32 ctl_flags = 0x00000000; 755 u32 ctl_flags = 0x00000000;
@@ -763,9 +764,9 @@ static int i2o_block_transfer(struct request *req)
763 764
764 c = dev->i2o_dev->iop; 765 c = dev->i2o_dev->iop;
765 766
766 m = i2o_msg_get(c, &msg); 767 msg = i2o_msg_get(c);
767 if (m == I2O_QUEUE_EMPTY) { 768 if (IS_ERR(msg)) {
768 rc = -EBUSY; 769 rc = PTR_ERR(msg);
769 goto exit; 770 goto exit;
770 } 771 }
771 772
@@ -775,8 +776,8 @@ static int i2o_block_transfer(struct request *req)
775 goto nop_msg; 776 goto nop_msg;
776 } 777 }
777 778
778 writel(i2o_block_driver.context, &msg->u.s.icntxt); 779 msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context);
779 writel(tcntxt, &msg->u.s.tcntxt); 780 msg->u.s.tcntxt = cpu_to_le32(tcntxt);
780 781
781 mptr = &msg->body[0]; 782 mptr = &msg->body[0];
782 783
@@ -834,11 +835,11 @@ static int i2o_block_transfer(struct request *req)
834 835
835 sgl_offset = SGL_OFFSET_12; 836 sgl_offset = SGL_OFFSET_12;
836 837
837 writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid, 838 msg->u.head[1] =
838 &msg->u.head[1]); 839 cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid);
839 840
840 writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); 841 *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
841 writel(tid, mptr++); 842 *mptr++ = cpu_to_le32(tid);
842 843
843 /* 844 /*
844 * ENABLE_DISCONNECT 845 * ENABLE_DISCONNECT
@@ -846,29 +847,31 @@ static int i2o_block_transfer(struct request *req)
846 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME 847 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
847 */ 848 */
848 if (rq_data_dir(req) == READ) { 849 if (rq_data_dir(req) == READ) {
849 cmd[0] = 0x28; 850 cmd[0] = READ_10;
850 scsi_flags = 0x60a0000a; 851 scsi_flags = 0x60a0000a;
851 } else { 852 } else {
852 cmd[0] = 0x2A; 853 cmd[0] = WRITE_10;
853 scsi_flags = 0xa0a0000a; 854 scsi_flags = 0xa0a0000a;
854 } 855 }
855 856
856 writel(scsi_flags, mptr++); 857 *mptr++ = cpu_to_le32(scsi_flags);
857 858
858 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); 859 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);
859 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); 860 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);
860 861
861 memcpy_toio(mptr, cmd, 10); 862 memcpy(mptr, cmd, 10);
862 mptr += 4; 863 mptr += 4;
863 writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); 864 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
864 } else 865 } else
865#endif 866#endif
866 { 867 {
867 writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); 868 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
868 writel(ctl_flags, mptr++); 869 *mptr++ = cpu_to_le32(ctl_flags);
869 writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); 870 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
870 writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++); 871 *mptr++ =
871 writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++); 872 cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT));
873 *mptr++ =
874 cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT));
872 } 875 }
873 876
874 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { 877 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -876,13 +879,13 @@ static int i2o_block_transfer(struct request *req)
876 goto context_remove; 879 goto context_remove;
877 } 880 }
878 881
879 writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | 882 msg->u.head[0] =
880 sgl_offset, &msg->u.head[0]); 883 cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
881 884
882 list_add_tail(&ireq->queue, &dev->open_queue); 885 list_add_tail(&ireq->queue, &dev->open_queue);
883 dev->open_queue_depth++; 886 dev->open_queue_depth++;
884 887
885 i2o_msg_post(c, m); 888 i2o_msg_post(c, msg);
886 889
887 return 0; 890 return 0;
888 891
@@ -890,7 +893,7 @@ static int i2o_block_transfer(struct request *req)
890 i2o_cntxt_list_remove(c, req); 893 i2o_cntxt_list_remove(c, req);
891 894
892 nop_msg: 895 nop_msg:
893 i2o_msg_nop(c, m); 896 i2o_msg_nop(c, msg);
894 897
895 exit: 898 exit:
896 return rc; 899 return rc;
@@ -978,13 +981,12 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
978 struct request_queue *queue; 981 struct request_queue *queue;
979 int rc; 982 int rc;
980 983
981 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 984 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
982 if (!dev) { 985 if (!dev) {
983 osm_err("Insufficient memory to allocate I2O Block disk.\n"); 986 osm_err("Insufficient memory to allocate I2O Block disk.\n");
984 rc = -ENOMEM; 987 rc = -ENOMEM;
985 goto exit; 988 goto exit;
986 } 989 }
987 memset(dev, 0, sizeof(*dev));
988 990
989 INIT_LIST_HEAD(&dev->open_queue); 991 INIT_LIST_HEAD(&dev->open_queue);
990 spin_lock_init(&dev->lock); 992 spin_lock_init(&dev->lock);
@@ -1049,8 +1051,8 @@ static int i2o_block_probe(struct device *dev)
1049 int rc; 1051 int rc;
1050 u64 size; 1052 u64 size;
1051 u32 blocksize; 1053 u32 blocksize;
1052 u32 flags, status;
1053 u16 body_size = 4; 1054 u16 body_size = 4;
1055 u16 power;
1054 unsigned short max_sectors; 1056 unsigned short max_sectors;
1055 1057
1056#ifdef CONFIG_I2O_EXT_ADAPTEC 1058#ifdef CONFIG_I2O_EXT_ADAPTEC
@@ -1108,22 +1110,20 @@ static int i2o_block_probe(struct device *dev)
1108 * Ask for the current media data. If that isn't supported 1110 * Ask for the current media data. If that isn't supported
1109 * then we ask for the device capacity data 1111 * then we ask for the device capacity data
1110 */ 1112 */
1111 if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || 1113 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
1112 i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1114 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
1113 blk_queue_hardsect_size(queue, blocksize); 1115 blk_queue_hardsect_size(queue, le32_to_cpu(blocksize));
1114 } else 1116 } else
1115 osm_warn("unable to get blocksize of %s\n", gd->disk_name); 1117 osm_warn("unable to get blocksize of %s\n", gd->disk_name);
1116 1118
1117 if (i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || 1119 if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) ||
1118 i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { 1120 !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
1119 set_capacity(gd, size >> KERNEL_SECTOR_SHIFT); 1121 set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT);
1120 } else 1122 } else
1121 osm_warn("could not get size of %s\n", gd->disk_name); 1123 osm_warn("could not get size of %s\n", gd->disk_name);
1122 1124
1123 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2)) 1125 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
1124 i2o_blk_dev->power = 0; 1126 i2o_blk_dev->power = power;
1125 i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
1126 i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
1127 1127
1128 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); 1128 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
1129 1129
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 3c3a7abebb1b..89daf67b764d 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -36,12 +36,12 @@
36 36
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38 38
39#include "core.h"
40
41#define SG_TABLESIZE 30 39#define SG_TABLESIZE 30
42 40
43static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, 41extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
44 unsigned long arg); 42
43static int i2o_cfg_ioctl(struct inode *, struct file *, unsigned int,
44 unsigned long);
45 45
46static spinlock_t i2o_config_lock; 46static spinlock_t i2o_config_lock;
47 47
@@ -230,8 +230,7 @@ static int i2o_cfg_swdl(unsigned long arg)
230 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 230 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
231 unsigned char maxfrag = 0, curfrag = 1; 231 unsigned char maxfrag = 0, curfrag = 1;
232 struct i2o_dma buffer; 232 struct i2o_dma buffer;
233 struct i2o_message __iomem *msg; 233 struct i2o_message *msg;
234 u32 m;
235 unsigned int status = 0, swlen = 0, fragsize = 8192; 234 unsigned int status = 0, swlen = 0, fragsize = 8192;
236 struct i2o_controller *c; 235 struct i2o_controller *c;
237 236
@@ -257,31 +256,34 @@ static int i2o_cfg_swdl(unsigned long arg)
257 if (!c) 256 if (!c)
258 return -ENXIO; 257 return -ENXIO;
259 258
260 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 259 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
261 if (m == I2O_QUEUE_EMPTY) 260 if (IS_ERR(msg))
262 return -EBUSY; 261 return PTR_ERR(msg);
263 262
264 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { 263 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
265 i2o_msg_nop(c, m); 264 i2o_msg_nop(c, msg);
266 return -ENOMEM; 265 return -ENOMEM;
267 } 266 }
268 267
269 __copy_from_user(buffer.virt, kxfer.buf, fragsize); 268 __copy_from_user(buffer.virt, kxfer.buf, fragsize);
270 269
271 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); 270 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
272 writel(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, 271 msg->u.head[1] =
273 &msg->u.head[1]); 272 cpu_to_le32(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 |
274 writel(i2o_config_driver.context, &msg->u.head[2]); 273 ADAPTER_TID);
275 writel(0, &msg->u.head[3]); 274 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
276 writel((((u32) kxfer.flags) << 24) | (((u32) kxfer.sw_type) << 16) | 275 msg->u.head[3] = cpu_to_le32(0);
277 (((u32) maxfrag) << 8) | (((u32) curfrag)), &msg->body[0]); 276 msg->body[0] =
278 writel(swlen, &msg->body[1]); 277 cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer.
279 writel(kxfer.sw_id, &msg->body[2]); 278 sw_type) << 16) |
280 writel(0xD0000000 | fragsize, &msg->body[3]); 279 (((u32) maxfrag) << 8) | (((u32) curfrag)));
281 writel(buffer.phys, &msg->body[4]); 280 msg->body[1] = cpu_to_le32(swlen);
281 msg->body[2] = cpu_to_le32(kxfer.sw_id);
282 msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
283 msg->body[4] = cpu_to_le32(buffer.phys);
282 284
283 osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); 285 osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
284 status = i2o_msg_post_wait_mem(c, m, 60, &buffer); 286 status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
285 287
286 if (status != -ETIMEDOUT) 288 if (status != -ETIMEDOUT)
287 i2o_dma_free(&c->pdev->dev, &buffer); 289 i2o_dma_free(&c->pdev->dev, &buffer);
@@ -302,8 +304,7 @@ static int i2o_cfg_swul(unsigned long arg)
302 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 304 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
303 unsigned char maxfrag = 0, curfrag = 1; 305 unsigned char maxfrag = 0, curfrag = 1;
304 struct i2o_dma buffer; 306 struct i2o_dma buffer;
305 struct i2o_message __iomem *msg; 307 struct i2o_message *msg;
306 u32 m;
307 unsigned int status = 0, swlen = 0, fragsize = 8192; 308 unsigned int status = 0, swlen = 0, fragsize = 8192;
308 struct i2o_controller *c; 309 struct i2o_controller *c;
309 int ret = 0; 310 int ret = 0;
@@ -330,30 +331,30 @@ static int i2o_cfg_swul(unsigned long arg)
330 if (!c) 331 if (!c)
331 return -ENXIO; 332 return -ENXIO;
332 333
333 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 334 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
334 if (m == I2O_QUEUE_EMPTY) 335 if (IS_ERR(msg))
335 return -EBUSY; 336 return PTR_ERR(msg);
336 337
337 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { 338 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
338 i2o_msg_nop(c, m); 339 i2o_msg_nop(c, msg);
339 return -ENOMEM; 340 return -ENOMEM;
340 } 341 }
341 342
342 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); 343 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
343 writel(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, 344 msg->u.head[1] =
344 &msg->u.head[1]); 345 cpu_to_le32(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID);
345 writel(i2o_config_driver.context, &msg->u.head[2]); 346 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
346 writel(0, &msg->u.head[3]); 347 msg->u.head[3] = cpu_to_le32(0);
347 writel((u32) kxfer.flags << 24 | (u32) kxfer. 348 msg->body[0] =
348 sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag, 349 cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.
349 &msg->body[0]); 350 sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag);
350 writel(swlen, &msg->body[1]); 351 msg->body[1] = cpu_to_le32(swlen);
351 writel(kxfer.sw_id, &msg->body[2]); 352 msg->body[2] = cpu_to_le32(kxfer.sw_id);
352 writel(0xD0000000 | fragsize, &msg->body[3]); 353 msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
353 writel(buffer.phys, &msg->body[4]); 354 msg->body[4] = cpu_to_le32(buffer.phys);
354 355
355 osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); 356 osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
356 status = i2o_msg_post_wait_mem(c, m, 60, &buffer); 357 status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
357 358
358 if (status != I2O_POST_WAIT_OK) { 359 if (status != I2O_POST_WAIT_OK) {
359 if (status != -ETIMEDOUT) 360 if (status != -ETIMEDOUT)
@@ -380,8 +381,7 @@ static int i2o_cfg_swdel(unsigned long arg)
380 struct i2o_controller *c; 381 struct i2o_controller *c;
381 struct i2o_sw_xfer kxfer; 382 struct i2o_sw_xfer kxfer;
382 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 383 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
383 struct i2o_message __iomem *msg; 384 struct i2o_message *msg;
384 u32 m;
385 unsigned int swlen; 385 unsigned int swlen;
386 int token; 386 int token;
387 387
@@ -395,21 +395,21 @@ static int i2o_cfg_swdel(unsigned long arg)
395 if (!c) 395 if (!c)
396 return -ENXIO; 396 return -ENXIO;
397 397
398 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 398 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
399 if (m == I2O_QUEUE_EMPTY) 399 if (IS_ERR(msg))
400 return -EBUSY; 400 return PTR_ERR(msg);
401 401
402 writel(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 402 msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0);
403 writel(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID, 403 msg->u.head[1] =
404 &msg->u.head[1]); 404 cpu_to_le32(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID);
405 writel(i2o_config_driver.context, &msg->u.head[2]); 405 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
406 writel(0, &msg->u.head[3]); 406 msg->u.head[3] = cpu_to_le32(0);
407 writel((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16, 407 msg->body[0] =
408 &msg->body[0]); 408 cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16);
409 writel(swlen, &msg->body[1]); 409 msg->body[1] = cpu_to_le32(swlen);
410 writel(kxfer.sw_id, &msg->body[2]); 410 msg->body[2] = cpu_to_le32(kxfer.sw_id);
411 411
412 token = i2o_msg_post_wait(c, m, 10); 412 token = i2o_msg_post_wait(c, msg, 10);
413 413
414 if (token != I2O_POST_WAIT_OK) { 414 if (token != I2O_POST_WAIT_OK) {
415 osm_info("swdel failed, DetailedStatus = %d\n", token); 415 osm_info("swdel failed, DetailedStatus = %d\n", token);
@@ -423,25 +423,24 @@ static int i2o_cfg_validate(unsigned long arg)
423{ 423{
424 int token; 424 int token;
425 int iop = (int)arg; 425 int iop = (int)arg;
426 struct i2o_message __iomem *msg; 426 struct i2o_message *msg;
427 u32 m;
428 struct i2o_controller *c; 427 struct i2o_controller *c;
429 428
430 c = i2o_find_iop(iop); 429 c = i2o_find_iop(iop);
431 if (!c) 430 if (!c)
432 return -ENXIO; 431 return -ENXIO;
433 432
434 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 433 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
435 if (m == I2O_QUEUE_EMPTY) 434 if (IS_ERR(msg))
436 return -EBUSY; 435 return PTR_ERR(msg);
437 436
438 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 437 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
439 writel(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop, 438 msg->u.head[1] =
440 &msg->u.head[1]); 439 cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop);
441 writel(i2o_config_driver.context, &msg->u.head[2]); 440 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
442 writel(0, &msg->u.head[3]); 441 msg->u.head[3] = cpu_to_le32(0);
443 442
444 token = i2o_msg_post_wait(c, m, 10); 443 token = i2o_msg_post_wait(c, msg, 10);
445 444
446 if (token != I2O_POST_WAIT_OK) { 445 if (token != I2O_POST_WAIT_OK) {
447 osm_info("Can't validate configuration, ErrorStatus = %d\n", 446 osm_info("Can't validate configuration, ErrorStatus = %d\n",
@@ -454,8 +453,7 @@ static int i2o_cfg_validate(unsigned long arg)
454 453
455static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp) 454static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
456{ 455{
457 struct i2o_message __iomem *msg; 456 struct i2o_message *msg;
458 u32 m;
459 struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg; 457 struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg;
460 struct i2o_evt_id kdesc; 458 struct i2o_evt_id kdesc;
461 struct i2o_controller *c; 459 struct i2o_controller *c;
@@ -474,18 +472,19 @@ static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
474 if (!d) 472 if (!d)
475 return -ENODEV; 473 return -ENODEV;
476 474
477 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 475 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
478 if (m == I2O_QUEUE_EMPTY) 476 if (IS_ERR(msg))
479 return -EBUSY; 477 return PTR_ERR(msg);
480 478
481 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 479 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
482 writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | kdesc.tid, 480 msg->u.head[1] =
483 &msg->u.head[1]); 481 cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 |
484 writel(i2o_config_driver.context, &msg->u.head[2]); 482 kdesc.tid);
485 writel(i2o_cntxt_list_add(c, fp->private_data), &msg->u.head[3]); 483 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
486 writel(kdesc.evt_mask, &msg->body[0]); 484 msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data));
485 msg->body[0] = cpu_to_le32(kdesc.evt_mask);
487 486
488 i2o_msg_post(c, m); 487 i2o_msg_post(c, msg);
489 488
490 return 0; 489 return 0;
491} 490}
@@ -537,7 +536,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
537 u32 sg_index = 0; 536 u32 sg_index = 0;
538 i2o_status_block *sb; 537 i2o_status_block *sb;
539 struct i2o_message *msg; 538 struct i2o_message *msg;
540 u32 m;
541 unsigned int iop; 539 unsigned int iop;
542 540
543 cmd = (struct i2o_cmd_passthru32 __user *)arg; 541 cmd = (struct i2o_cmd_passthru32 __user *)arg;
@@ -553,7 +551,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
553 return -ENXIO; 551 return -ENXIO;
554 } 552 }
555 553
556 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 554 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
557 555
558 sb = c->status_block.virt; 556 sb = c->status_block.virt;
559 557
@@ -585,19 +583,15 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
585 reply_size >>= 16; 583 reply_size >>= 16;
586 reply_size <<= 2; 584 reply_size <<= 2;
587 585
588 reply = kmalloc(reply_size, GFP_KERNEL); 586 reply = kzalloc(reply_size, GFP_KERNEL);
589 if (!reply) { 587 if (!reply) {
590 printk(KERN_WARNING "%s: Could not allocate reply buffer\n", 588 printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
591 c->name); 589 c->name);
592 return -ENOMEM; 590 return -ENOMEM;
593 } 591 }
594 memset(reply, 0, reply_size);
595 592
596 sg_offset = (msg->u.head[0] >> 4) & 0x0f; 593 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
597 594
598 writel(i2o_config_driver.context, &msg->u.s.icntxt);
599 writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
600
601 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); 595 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
602 if (sg_offset) { 596 if (sg_offset) {
603 struct sg_simple_element *sg; 597 struct sg_simple_element *sg;
@@ -631,7 +625,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
631 goto cleanup; 625 goto cleanup;
632 } 626 }
633 sg_size = sg[i].flag_count & 0xffffff; 627 sg_size = sg[i].flag_count & 0xffffff;
634 p = &(sg_list[sg_index++]); 628 p = &(sg_list[sg_index]);
635 /* Allocate memory for the transfer */ 629 /* Allocate memory for the transfer */
636 if (i2o_dma_alloc 630 if (i2o_dma_alloc
637 (&c->pdev->dev, p, sg_size, 631 (&c->pdev->dev, p, sg_size,
@@ -642,6 +636,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
642 rcode = -ENOMEM; 636 rcode = -ENOMEM;
643 goto sg_list_cleanup; 637 goto sg_list_cleanup;
644 } 638 }
639 sg_index++;
645 /* Copy in the user's SG buffer if necessary */ 640 /* Copy in the user's SG buffer if necessary */
646 if (sg[i]. 641 if (sg[i].
647 flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { 642 flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
@@ -662,9 +657,11 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
662 } 657 }
663 } 658 }
664 659
665 rcode = i2o_msg_post_wait(c, m, 60); 660 rcode = i2o_msg_post_wait(c, msg, 60);
666 if (rcode) 661 if (rcode) {
662 reply[4] = ((u32) rcode) << 24;
667 goto sg_list_cleanup; 663 goto sg_list_cleanup;
664 }
668 665
669 if (sg_offset) { 666 if (sg_offset) {
670 u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE]; 667 u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE];
@@ -714,6 +711,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
714 } 711 }
715 } 712 }
716 713
714 sg_list_cleanup:
717 /* Copy back the reply to user space */ 715 /* Copy back the reply to user space */
718 if (reply_size) { 716 if (reply_size) {
719 // we wrote our own values for context - now restore the user supplied ones 717 // we wrote our own values for context - now restore the user supplied ones
@@ -731,7 +729,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
731 } 729 }
732 } 730 }
733 731
734 sg_list_cleanup:
735 for (i = 0; i < sg_index; i++) 732 for (i = 0; i < sg_index; i++)
736 i2o_dma_free(&c->pdev->dev, &sg_list[i]); 733 i2o_dma_free(&c->pdev->dev, &sg_list[i]);
737 734
@@ -780,8 +777,7 @@ static int i2o_cfg_passthru(unsigned long arg)
780 u32 i = 0; 777 u32 i = 0;
781 void *p = NULL; 778 void *p = NULL;
782 i2o_status_block *sb; 779 i2o_status_block *sb;
783 struct i2o_message __iomem *msg; 780 struct i2o_message *msg;
784 u32 m;
785 unsigned int iop; 781 unsigned int iop;
786 782
787 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg)) 783 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
@@ -793,7 +789,7 @@ static int i2o_cfg_passthru(unsigned long arg)
793 return -ENXIO; 789 return -ENXIO;
794 } 790 }
795 791
796 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 792 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
797 793
798 sb = c->status_block.virt; 794 sb = c->status_block.virt;
799 795
@@ -820,19 +816,15 @@ static int i2o_cfg_passthru(unsigned long arg)
820 reply_size >>= 16; 816 reply_size >>= 16;
821 reply_size <<= 2; 817 reply_size <<= 2;
822 818
823 reply = kmalloc(reply_size, GFP_KERNEL); 819 reply = kzalloc(reply_size, GFP_KERNEL);
824 if (!reply) { 820 if (!reply) {
825 printk(KERN_WARNING "%s: Could not allocate reply buffer\n", 821 printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
826 c->name); 822 c->name);
827 return -ENOMEM; 823 return -ENOMEM;
828 } 824 }
829 memset(reply, 0, reply_size);
830 825
831 sg_offset = (msg->u.head[0] >> 4) & 0x0f; 826 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
832 827
833 writel(i2o_config_driver.context, &msg->u.s.icntxt);
834 writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
835
836 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); 828 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
837 if (sg_offset) { 829 if (sg_offset) {
838 struct sg_simple_element *sg; 830 struct sg_simple_element *sg;
@@ -894,9 +886,11 @@ static int i2o_cfg_passthru(unsigned long arg)
894 } 886 }
895 } 887 }
896 888
897 rcode = i2o_msg_post_wait(c, m, 60); 889 rcode = i2o_msg_post_wait(c, msg, 60);
898 if (rcode) 890 if (rcode) {
891 reply[4] = ((u32) rcode) << 24;
899 goto sg_list_cleanup; 892 goto sg_list_cleanup;
893 }
900 894
901 if (sg_offset) { 895 if (sg_offset) {
902 u32 msg[128]; 896 u32 msg[128];
@@ -946,6 +940,7 @@ static int i2o_cfg_passthru(unsigned long arg)
946 } 940 }
947 } 941 }
948 942
943 sg_list_cleanup:
949 /* Copy back the reply to user space */ 944 /* Copy back the reply to user space */
950 if (reply_size) { 945 if (reply_size) {
951 // we wrote our own values for context - now restore the user supplied ones 946 // we wrote our own values for context - now restore the user supplied ones
@@ -962,7 +957,6 @@ static int i2o_cfg_passthru(unsigned long arg)
962 } 957 }
963 } 958 }
964 959
965 sg_list_cleanup:
966 for (i = 0; i < sg_index; i++) 960 for (i = 0; i < sg_index; i++)
967 kfree(sg_list[i]); 961 kfree(sg_list[i]);
968 962
diff --git a/drivers/message/i2o/i2o_lan.h b/drivers/message/i2o/i2o_lan.h
index 561d63304d7e..6502b817df58 100644
--- a/drivers/message/i2o/i2o_lan.h
+++ b/drivers/message/i2o/i2o_lan.h
@@ -103,14 +103,14 @@
103#define I2O_LAN_DSC_SUSPENDED 0x11 103#define I2O_LAN_DSC_SUSPENDED 0x11
104 104
105struct i2o_packet_info { 105struct i2o_packet_info {
106 u32 offset : 24; 106 u32 offset:24;
107 u32 flags : 8; 107 u32 flags:8;
108 u32 len : 24; 108 u32 len:24;
109 u32 status : 8; 109 u32 status:8;
110}; 110};
111 111
112struct i2o_bucket_descriptor { 112struct i2o_bucket_descriptor {
113 u32 context; /* FIXME: 64bit support */ 113 u32 context; /* FIXME: 64bit support */
114 struct i2o_packet_info packet_info[1]; 114 struct i2o_packet_info packet_info[1];
115}; 115};
116 116
@@ -127,14 +127,14 @@ struct i2o_lan_local {
127 u8 unit; 127 u8 unit;
128 struct i2o_device *i2o_dev; 128 struct i2o_device *i2o_dev;
129 129
130 struct fddi_statistics stats; /* see also struct net_device_stats */ 130 struct fddi_statistics stats; /* see also struct net_device_stats */
131 unsigned short (*type_trans)(struct sk_buff *, struct net_device *); 131 unsigned short (*type_trans) (struct sk_buff *, struct net_device *);
132 atomic_t buckets_out; /* nbr of unused buckets on DDM */ 132 atomic_t buckets_out; /* nbr of unused buckets on DDM */
133 atomic_t tx_out; /* outstanding TXes */ 133 atomic_t tx_out; /* outstanding TXes */
134 u8 tx_count; /* packets in one TX message frame */ 134 u8 tx_count; /* packets in one TX message frame */
135 u16 tx_max_out; /* DDM's Tx queue len */ 135 u16 tx_max_out; /* DDM's Tx queue len */
136 u8 sgl_max; /* max SGLs in one message frame */ 136 u8 sgl_max; /* max SGLs in one message frame */
137 u32 m; /* IOP address of the batch msg frame */ 137 u32 m; /* IOP address of the batch msg frame */
138 138
139 struct work_struct i2o_batch_send_task; 139 struct work_struct i2o_batch_send_task;
140 int send_active; 140 int send_active;
@@ -144,16 +144,16 @@ struct i2o_lan_local {
144 144
145 spinlock_t tx_lock; 145 spinlock_t tx_lock;
146 146
147 u32 max_size_mc_table; /* max number of multicast addresses */ 147 u32 max_size_mc_table; /* max number of multicast addresses */
148 148
149 /* LAN OSM configurable parameters are here: */ 149 /* LAN OSM configurable parameters are here: */
150 150
151 u16 max_buckets_out; /* max nbr of buckets to send to DDM */ 151 u16 max_buckets_out; /* max nbr of buckets to send to DDM */
152 u16 bucket_thresh; /* send more when this many used */ 152 u16 bucket_thresh; /* send more when this many used */
153 u16 rx_copybreak; 153 u16 rx_copybreak;
154 154
155 u8 tx_batch_mode; /* Set when using batch mode sends */ 155 u8 tx_batch_mode; /* Set when using batch mode sends */
156 u32 i2o_event_mask; /* To turn on interesting event flags */ 156 u32 i2o_event_mask; /* To turn on interesting event flags */
157}; 157};
158 158
159#endif /* _I2O_LAN_H */ 159#endif /* _I2O_LAN_H */
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index d559a1758363..2a0c42b8cda5 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -28,7 +28,7 @@
28 */ 28 */
29 29
30#define OSM_NAME "proc-osm" 30#define OSM_NAME "proc-osm"
31#define OSM_VERSION "1.145" 31#define OSM_VERSION "1.316"
32#define OSM_DESCRIPTION "I2O ProcFS OSM" 32#define OSM_DESCRIPTION "I2O ProcFS OSM"
33 33
34#define I2O_MAX_MODULES 4 34#define I2O_MAX_MODULES 4
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 9f1744c3933b..f9e5a23697a1 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -70,7 +70,7 @@
70#include <scsi/sg_request.h> 70#include <scsi/sg_request.h>
71 71
72#define OSM_NAME "scsi-osm" 72#define OSM_NAME "scsi-osm"
73#define OSM_VERSION "1.282" 73#define OSM_VERSION "1.316"
74#define OSM_DESCRIPTION "I2O SCSI Peripheral OSM" 74#define OSM_DESCRIPTION "I2O SCSI Peripheral OSM"
75 75
76static struct i2o_driver i2o_scsi_driver; 76static struct i2o_driver i2o_scsi_driver;
@@ -113,7 +113,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
113 113
114 list_for_each_entry(i2o_dev, &c->devices, list) 114 list_for_each_entry(i2o_dev, &c->devices, list)
115 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { 115 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
116 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) 116 if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
117 && (type == 0x01)) /* SCSI bus */ 117 && (type == 0x01)) /* SCSI bus */
118 max_channel++; 118 max_channel++;
119 } 119 }
@@ -146,7 +146,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
146 i = 0; 146 i = 0;
147 list_for_each_entry(i2o_dev, &c->devices, list) 147 list_for_each_entry(i2o_dev, &c->devices, list)
148 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { 148 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
149 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) 149 if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
150 && (type == 0x01)) /* only SCSI bus */ 150 && (type == 0x01)) /* only SCSI bus */
151 i2o_shost->channel[i++] = i2o_dev; 151 i2o_shost->channel[i++] = i2o_dev;
152 152
@@ -238,13 +238,15 @@ static int i2o_scsi_probe(struct device *dev)
238 u8 type; 238 u8 type;
239 struct i2o_device *d = i2o_shost->channel[0]; 239 struct i2o_device *d = i2o_shost->channel[0];
240 240
241 if (i2o_parm_field_get(d, 0x0000, 0, &type, 1) 241 if (!i2o_parm_field_get(d, 0x0000, 0, &type, 1)
242 && (type == 0x01)) /* SCSI bus */ 242 && (type == 0x01)) /* SCSI bus */
243 if (i2o_parm_field_get(d, 0x0200, 4, &id, 4)) { 243 if (!i2o_parm_field_get(d, 0x0200, 4, &id, 4)) {
244 channel = 0; 244 channel = 0;
245 if (i2o_dev->lct_data.class_id == 245 if (i2o_dev->lct_data.class_id ==
246 I2O_CLASS_RANDOM_BLOCK_STORAGE) 246 I2O_CLASS_RANDOM_BLOCK_STORAGE)
247 lun = i2o_shost->lun++; 247 lun =
248 cpu_to_le64(i2o_shost->
249 lun++);
248 else 250 else
249 lun = 0; 251 lun = 0;
250 } 252 }
@@ -253,10 +255,10 @@ static int i2o_scsi_probe(struct device *dev)
253 break; 255 break;
254 256
255 case I2O_CLASS_SCSI_PERIPHERAL: 257 case I2O_CLASS_SCSI_PERIPHERAL:
256 if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4) < 0) 258 if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4))
257 return -EFAULT; 259 return -EFAULT;
258 260
259 if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8) < 0) 261 if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8))
260 return -EFAULT; 262 return -EFAULT;
261 263
262 parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid); 264 parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid);
@@ -281,20 +283,22 @@ static int i2o_scsi_probe(struct device *dev)
281 return -EFAULT; 283 return -EFAULT;
282 } 284 }
283 285
284 if (id >= scsi_host->max_id) { 286 if (le32_to_cpu(id) >= scsi_host->max_id) {
285 osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", id, 287 osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)",
286 scsi_host->max_id); 288 le32_to_cpu(id), scsi_host->max_id);
287 return -EFAULT; 289 return -EFAULT;
288 } 290 }
289 291
290 if (lun >= scsi_host->max_lun) { 292 if (le64_to_cpu(lun) >= scsi_host->max_lun) {
291 osm_warn("SCSI device id (%d) >= max_lun of I2O host (%d)", 293 osm_warn("SCSI device lun (%lu) >= max_lun of I2O host (%d)",
292 (unsigned int)lun, scsi_host->max_lun); 294 (long unsigned int)le64_to_cpu(lun),
295 scsi_host->max_lun);
293 return -EFAULT; 296 return -EFAULT;
294 } 297 }
295 298
296 scsi_dev = 299 scsi_dev =
297 __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev); 300 __scsi_add_device(i2o_shost->scsi_host, channel, le32_to_cpu(id),
301 le64_to_cpu(lun), i2o_dev);
298 302
299 if (IS_ERR(scsi_dev)) { 303 if (IS_ERR(scsi_dev)) {
300 osm_warn("can not add SCSI device %03x\n", 304 osm_warn("can not add SCSI device %03x\n",
@@ -305,8 +309,9 @@ static int i2o_scsi_probe(struct device *dev)
305 sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj, 309 sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj,
306 "scsi"); 310 "scsi");
307 311
308 osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %d\n", 312 osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %ld\n",
309 i2o_dev->lct_data.tid, channel, id, (unsigned int)lun); 313 i2o_dev->lct_data.tid, channel, le32_to_cpu(id),
314 (long unsigned int)le64_to_cpu(lun));
310 315
311 return 0; 316 return 0;
312}; 317};
@@ -510,8 +515,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
510 struct i2o_controller *c; 515 struct i2o_controller *c;
511 struct i2o_device *i2o_dev; 516 struct i2o_device *i2o_dev;
512 int tid; 517 int tid;
513 struct i2o_message __iomem *msg; 518 struct i2o_message *msg;
514 u32 m;
515 /* 519 /*
516 * ENABLE_DISCONNECT 520 * ENABLE_DISCONNECT
517 * SIMPLE_TAG 521 * SIMPLE_TAG
@@ -519,7 +523,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
519 */ 523 */
520 u32 scsi_flags = 0x20a00000; 524 u32 scsi_flags = 0x20a00000;
521 u32 sgl_offset; 525 u32 sgl_offset;
522 u32 __iomem *mptr; 526 u32 *mptr;
523 u32 cmd = I2O_CMD_SCSI_EXEC << 24; 527 u32 cmd = I2O_CMD_SCSI_EXEC << 24;
524 int rc = 0; 528 int rc = 0;
525 529
@@ -576,8 +580,8 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
576 * throw it back to the scsi layer 580 * throw it back to the scsi layer
577 */ 581 */
578 582
579 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 583 msg = i2o_msg_get(c);
580 if (m == I2O_QUEUE_EMPTY) { 584 if (IS_ERR(msg)) {
581 rc = SCSI_MLQUEUE_HOST_BUSY; 585 rc = SCSI_MLQUEUE_HOST_BUSY;
582 goto exit; 586 goto exit;
583 } 587 }
@@ -617,16 +621,16 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
617 if (sgl_offset == SGL_OFFSET_10) 621 if (sgl_offset == SGL_OFFSET_10)
618 sgl_offset = SGL_OFFSET_12; 622 sgl_offset = SGL_OFFSET_12;
619 cmd = I2O_CMD_PRIVATE << 24; 623 cmd = I2O_CMD_PRIVATE << 24;
620 writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); 624 *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
621 writel(adpt_flags | tid, mptr++); 625 *mptr++ = cpu_to_le32(adpt_flags | tid);
622 } 626 }
623#endif 627#endif
624 628
625 writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); 629 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
626 writel(i2o_scsi_driver.context, &msg->u.s.icntxt); 630 msg->u.s.icntxt = cpu_to_le32(i2o_scsi_driver.context);
627 631
628 /* We want the SCSI control block back */ 632 /* We want the SCSI control block back */
629 writel(i2o_cntxt_list_add(c, SCpnt), &msg->u.s.tcntxt); 633 msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, SCpnt));
630 634
631 /* LSI_920_PCI_QUIRK 635 /* LSI_920_PCI_QUIRK
632 * 636 *
@@ -649,15 +653,15 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
649 } 653 }
650 */ 654 */
651 655
652 writel(scsi_flags | SCpnt->cmd_len, mptr++); 656 *mptr++ = cpu_to_le32(scsi_flags | SCpnt->cmd_len);
653 657
654 /* Write SCSI command into the message - always 16 byte block */ 658 /* Write SCSI command into the message - always 16 byte block */
655 memcpy_toio(mptr, SCpnt->cmnd, 16); 659 memcpy(mptr, SCpnt->cmnd, 16);
656 mptr += 4; 660 mptr += 4;
657 661
658 if (sgl_offset != SGL_OFFSET_0) { 662 if (sgl_offset != SGL_OFFSET_0) {
659 /* write size of data addressed by SGL */ 663 /* write size of data addressed by SGL */
660 writel(SCpnt->request_bufflen, mptr++); 664 *mptr++ = cpu_to_le32(SCpnt->request_bufflen);
661 665
662 /* Now fill in the SGList and command */ 666 /* Now fill in the SGList and command */
663 if (SCpnt->use_sg) { 667 if (SCpnt->use_sg) {
@@ -676,11 +680,11 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
676 } 680 }
677 681
678 /* Stick the headers on */ 682 /* Stick the headers on */
679 writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset, 683 msg->u.head[0] =
680 &msg->u.head[0]); 684 cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
681 685
682 /* Queue the message */ 686 /* Queue the message */
683 i2o_msg_post(c, m); 687 i2o_msg_post(c, msg);
684 688
685 osm_debug("Issued %ld\n", SCpnt->serial_number); 689 osm_debug("Issued %ld\n", SCpnt->serial_number);
686 690
@@ -688,7 +692,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
688 692
689 nomem: 693 nomem:
690 rc = -ENOMEM; 694 rc = -ENOMEM;
691 i2o_msg_nop(c, m); 695 i2o_msg_nop(c, msg);
692 696
693 exit: 697 exit:
694 return rc; 698 return rc;
@@ -709,8 +713,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
709{ 713{
710 struct i2o_device *i2o_dev; 714 struct i2o_device *i2o_dev;
711 struct i2o_controller *c; 715 struct i2o_controller *c;
712 struct i2o_message __iomem *msg; 716 struct i2o_message *msg;
713 u32 m;
714 int tid; 717 int tid;
715 int status = FAILED; 718 int status = FAILED;
716 719
@@ -720,16 +723,16 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
720 c = i2o_dev->iop; 723 c = i2o_dev->iop;
721 tid = i2o_dev->lct_data.tid; 724 tid = i2o_dev->lct_data.tid;
722 725
723 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 726 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
724 if (m == I2O_QUEUE_EMPTY) 727 if (IS_ERR(msg))
725 return SCSI_MLQUEUE_HOST_BUSY; 728 return SCSI_MLQUEUE_HOST_BUSY;
726 729
727 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 730 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
728 writel(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid, 731 msg->u.head[1] =
729 &msg->u.head[1]); 732 cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid);
730 writel(i2o_cntxt_list_get_ptr(c, SCpnt), &msg->body[0]); 733 msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt));
731 734
732 if (i2o_msg_post_wait(c, m, I2O_TIMEOUT_SCSI_SCB_ABORT)) 735 if (i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT))
733 status = SUCCESS; 736 status = SUCCESS;
734 737
735 return status; 738 return status;
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 4eb53258842e..492167446936 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -32,7 +32,7 @@
32#include "core.h" 32#include "core.h"
33 33
34#define OSM_NAME "i2o" 34#define OSM_NAME "i2o"
35#define OSM_VERSION "1.288" 35#define OSM_VERSION "1.325"
36#define OSM_DESCRIPTION "I2O subsystem" 36#define OSM_DESCRIPTION "I2O subsystem"
37 37
38/* global I2O controller list */ 38/* global I2O controller list */
@@ -47,27 +47,6 @@ static struct i2o_dma i2o_systab;
47static int i2o_hrt_get(struct i2o_controller *c); 47static int i2o_hrt_get(struct i2o_controller *c);
48 48
49/** 49/**
50 * i2o_msg_nop - Returns a message which is not used
51 * @c: I2O controller from which the message was created
52 * @m: message which should be returned
53 *
54 * If you fetch a message via i2o_msg_get, and can't use it, you must
55 * return the message with this function. Otherwise the message frame
56 * is lost.
57 */
58void i2o_msg_nop(struct i2o_controller *c, u32 m)
59{
60 struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
61
62 writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
63 writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
64 &msg->u.head[1]);
65 writel(0, &msg->u.head[2]);
66 writel(0, &msg->u.head[3]);
67 i2o_msg_post(c, m);
68};
69
70/**
71 * i2o_msg_get_wait - obtain an I2O message from the IOP 50 * i2o_msg_get_wait - obtain an I2O message from the IOP
72 * @c: I2O controller 51 * @c: I2O controller
73 * @msg: pointer to a I2O message pointer 52 * @msg: pointer to a I2O message pointer
@@ -81,22 +60,21 @@ void i2o_msg_nop(struct i2o_controller *c, u32 m)
81 * address from the read port (see the i2o spec). If no message is 60 * address from the read port (see the i2o spec). If no message is
82 * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. 61 * available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
83 */ 62 */
84u32 i2o_msg_get_wait(struct i2o_controller *c, 63struct i2o_message *i2o_msg_get_wait(struct i2o_controller *c, int wait)
85 struct i2o_message __iomem ** msg, int wait)
86{ 64{
87 unsigned long timeout = jiffies + wait * HZ; 65 unsigned long timeout = jiffies + wait * HZ;
88 u32 m; 66 struct i2o_message *msg;
89 67
90 while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) { 68 while (IS_ERR(msg = i2o_msg_get(c))) {
91 if (time_after(jiffies, timeout)) { 69 if (time_after(jiffies, timeout)) {
92 osm_debug("%s: Timeout waiting for message frame.\n", 70 osm_debug("%s: Timeout waiting for message frame.\n",
93 c->name); 71 c->name);
94 return I2O_QUEUE_EMPTY; 72 return ERR_PTR(-ETIMEDOUT);
95 } 73 }
96 schedule_timeout_uninterruptible(1); 74 schedule_timeout_uninterruptible(1);
97 } 75 }
98 76
99 return m; 77 return msg;
100}; 78};
101 79
102#if BITS_PER_LONG == 64 80#if BITS_PER_LONG == 64
@@ -301,8 +279,7 @@ struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid)
301 */ 279 */
302static int i2o_iop_quiesce(struct i2o_controller *c) 280static int i2o_iop_quiesce(struct i2o_controller *c)
303{ 281{
304 struct i2o_message __iomem *msg; 282 struct i2o_message *msg;
305 u32 m;
306 i2o_status_block *sb = c->status_block.virt; 283 i2o_status_block *sb = c->status_block.virt;
307 int rc; 284 int rc;
308 285
@@ -313,16 +290,17 @@ static int i2o_iop_quiesce(struct i2o_controller *c)
313 (sb->iop_state != ADAPTER_STATE_OPERATIONAL)) 290 (sb->iop_state != ADAPTER_STATE_OPERATIONAL))
314 return 0; 291 return 0;
315 292
316 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 293 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
317 if (m == I2O_QUEUE_EMPTY) 294 if (IS_ERR(msg))
318 return -ETIMEDOUT; 295 return PTR_ERR(msg);
319 296
320 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 297 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
321 writel(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | ADAPTER_TID, 298 msg->u.head[1] =
322 &msg->u.head[1]); 299 cpu_to_le32(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 |
300 ADAPTER_TID);
323 301
324 /* Long timeout needed for quiesce if lots of devices */ 302 /* Long timeout needed for quiesce if lots of devices */
325 if ((rc = i2o_msg_post_wait(c, m, 240))) 303 if ((rc = i2o_msg_post_wait(c, msg, 240)))
326 osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc); 304 osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc);
327 else 305 else
328 osm_debug("%s: Quiesced.\n", c->name); 306 osm_debug("%s: Quiesced.\n", c->name);
@@ -342,8 +320,7 @@ static int i2o_iop_quiesce(struct i2o_controller *c)
342 */ 320 */
343static int i2o_iop_enable(struct i2o_controller *c) 321static int i2o_iop_enable(struct i2o_controller *c)
344{ 322{
345 struct i2o_message __iomem *msg; 323 struct i2o_message *msg;
346 u32 m;
347 i2o_status_block *sb = c->status_block.virt; 324 i2o_status_block *sb = c->status_block.virt;
348 int rc; 325 int rc;
349 326
@@ -353,16 +330,17 @@ static int i2o_iop_enable(struct i2o_controller *c)
353 if (sb->iop_state != ADAPTER_STATE_READY) 330 if (sb->iop_state != ADAPTER_STATE_READY)
354 return -EINVAL; 331 return -EINVAL;
355 332
356 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 333 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
357 if (m == I2O_QUEUE_EMPTY) 334 if (IS_ERR(msg))
358 return -ETIMEDOUT; 335 return PTR_ERR(msg);
359 336
360 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 337 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
361 writel(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | ADAPTER_TID, 338 msg->u.head[1] =
362 &msg->u.head[1]); 339 cpu_to_le32(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 |
340 ADAPTER_TID);
363 341
364 /* How long of a timeout do we need? */ 342 /* How long of a timeout do we need? */
365 if ((rc = i2o_msg_post_wait(c, m, 240))) 343 if ((rc = i2o_msg_post_wait(c, msg, 240)))
366 osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc); 344 osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc);
367 else 345 else
368 osm_debug("%s: Enabled.\n", c->name); 346 osm_debug("%s: Enabled.\n", c->name);
@@ -413,22 +391,22 @@ static inline void i2o_iop_enable_all(void)
413 */ 391 */
414static int i2o_iop_clear(struct i2o_controller *c) 392static int i2o_iop_clear(struct i2o_controller *c)
415{ 393{
416 struct i2o_message __iomem *msg; 394 struct i2o_message *msg;
417 u32 m;
418 int rc; 395 int rc;
419 396
420 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 397 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
421 if (m == I2O_QUEUE_EMPTY) 398 if (IS_ERR(msg))
422 return -ETIMEDOUT; 399 return PTR_ERR(msg);
423 400
424 /* Quiesce all IOPs first */ 401 /* Quiesce all IOPs first */
425 i2o_iop_quiesce_all(); 402 i2o_iop_quiesce_all();
426 403
427 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 404 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
428 writel(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | ADAPTER_TID, 405 msg->u.head[1] =
429 &msg->u.head[1]); 406 cpu_to_le32(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 |
407 ADAPTER_TID);
430 408
431 if ((rc = i2o_msg_post_wait(c, m, 30))) 409 if ((rc = i2o_msg_post_wait(c, msg, 30)))
432 osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc); 410 osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc);
433 else 411 else
434 osm_debug("%s: Cleared.\n", c->name); 412 osm_debug("%s: Cleared.\n", c->name);
@@ -446,13 +424,13 @@ static int i2o_iop_clear(struct i2o_controller *c)
446 * Clear and (re)initialize IOP's outbound queue and post the message 424 * Clear and (re)initialize IOP's outbound queue and post the message
447 * frames to the IOP. 425 * frames to the IOP.
448 * 426 *
449 * Returns 0 on success or a negative errno code on failure. 427 * Returns 0 on success or negative error code on failure.
450 */ 428 */
451static int i2o_iop_init_outbound_queue(struct i2o_controller *c) 429static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
452{ 430{
453 volatile u8 *status = c->status.virt;
454 u32 m; 431 u32 m;
455 struct i2o_message __iomem *msg; 432 volatile u8 *status = c->status.virt;
433 struct i2o_message *msg;
456 ulong timeout; 434 ulong timeout;
457 int i; 435 int i;
458 436
@@ -460,23 +438,24 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
460 438
461 memset(c->status.virt, 0, 4); 439 memset(c->status.virt, 0, 4);
462 440
463 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 441 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
464 if (m == I2O_QUEUE_EMPTY) 442 if (IS_ERR(msg))
465 return -ETIMEDOUT; 443 return PTR_ERR(msg);
466 444
467 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); 445 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
468 writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID, 446 msg->u.head[1] =
469 &msg->u.head[1]); 447 cpu_to_le32(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 |
470 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 448 ADAPTER_TID);
471 writel(0x00000000, &msg->u.s.tcntxt); 449 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
472 writel(PAGE_SIZE, &msg->body[0]); 450 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
451 msg->body[0] = cpu_to_le32(PAGE_SIZE);
473 /* Outbound msg frame size in words and Initcode */ 452 /* Outbound msg frame size in words and Initcode */
474 writel(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); 453 msg->body[1] = cpu_to_le32(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80);
475 writel(0xd0000004, &msg->body[2]); 454 msg->body[2] = cpu_to_le32(0xd0000004);
476 writel(i2o_dma_low(c->status.phys), &msg->body[3]); 455 msg->body[3] = cpu_to_le32(i2o_dma_low(c->status.phys));
477 writel(i2o_dma_high(c->status.phys), &msg->body[4]); 456 msg->body[4] = cpu_to_le32(i2o_dma_high(c->status.phys));
478 457
479 i2o_msg_post(c, m); 458 i2o_msg_post(c, msg);
480 459
481 timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ; 460 timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
482 while (*status <= I2O_CMD_IN_PROGRESS) { 461 while (*status <= I2O_CMD_IN_PROGRESS) {
@@ -511,34 +490,34 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
511static int i2o_iop_reset(struct i2o_controller *c) 490static int i2o_iop_reset(struct i2o_controller *c)
512{ 491{
513 volatile u8 *status = c->status.virt; 492 volatile u8 *status = c->status.virt;
514 struct i2o_message __iomem *msg; 493 struct i2o_message *msg;
515 u32 m;
516 unsigned long timeout; 494 unsigned long timeout;
517 i2o_status_block *sb = c->status_block.virt; 495 i2o_status_block *sb = c->status_block.virt;
518 int rc = 0; 496 int rc = 0;
519 497
520 osm_debug("%s: Resetting controller\n", c->name); 498 osm_debug("%s: Resetting controller\n", c->name);
521 499
522 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 500 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
523 if (m == I2O_QUEUE_EMPTY) 501 if (IS_ERR(msg))
524 return -ETIMEDOUT; 502 return PTR_ERR(msg);
525 503
526 memset(c->status_block.virt, 0, 8); 504 memset(c->status_block.virt, 0, 8);
527 505
528 /* Quiesce all IOPs first */ 506 /* Quiesce all IOPs first */
529 i2o_iop_quiesce_all(); 507 i2o_iop_quiesce_all();
530 508
531 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 509 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0);
532 writel(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | ADAPTER_TID, 510 msg->u.head[1] =
533 &msg->u.head[1]); 511 cpu_to_le32(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 |
534 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 512 ADAPTER_TID);
535 writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context 513 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
536 writel(0, &msg->body[0]); 514 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
537 writel(0, &msg->body[1]); 515 msg->body[0] = cpu_to_le32(0x00000000);
538 writel(i2o_dma_low(c->status.phys), &msg->body[2]); 516 msg->body[1] = cpu_to_le32(0x00000000);
539 writel(i2o_dma_high(c->status.phys), &msg->body[3]); 517 msg->body[2] = cpu_to_le32(i2o_dma_low(c->status.phys));
518 msg->body[3] = cpu_to_le32(i2o_dma_high(c->status.phys));
540 519
541 i2o_msg_post(c, m); 520 i2o_msg_post(c, msg);
542 521
543 /* Wait for a reply */ 522 /* Wait for a reply */
544 timeout = jiffies + I2O_TIMEOUT_RESET * HZ; 523 timeout = jiffies + I2O_TIMEOUT_RESET * HZ;
@@ -567,18 +546,15 @@ static int i2o_iop_reset(struct i2o_controller *c)
567 osm_debug("%s: Reset in progress, waiting for reboot...\n", 546 osm_debug("%s: Reset in progress, waiting for reboot...\n",
568 c->name); 547 c->name);
569 548
570 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); 549 while (IS_ERR(msg = i2o_msg_get_wait(c, I2O_TIMEOUT_RESET))) {
571 while (m == I2O_QUEUE_EMPTY) {
572 if (time_after(jiffies, timeout)) { 550 if (time_after(jiffies, timeout)) {
573 osm_err("%s: IOP reset timeout.\n", c->name); 551 osm_err("%s: IOP reset timeout.\n", c->name);
574 rc = -ETIMEDOUT; 552 rc = PTR_ERR(msg);
575 goto exit; 553 goto exit;
576 } 554 }
577 schedule_timeout_uninterruptible(1); 555 schedule_timeout_uninterruptible(1);
578
579 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
580 } 556 }
581 i2o_msg_nop(c, m); 557 i2o_msg_nop(c, msg);
582 558
583 /* from here all quiesce commands are safe */ 559 /* from here all quiesce commands are safe */
584 c->no_quiesce = 0; 560 c->no_quiesce = 0;
@@ -686,8 +662,7 @@ static int i2o_iop_activate(struct i2o_controller *c)
686 */ 662 */
687static int i2o_iop_systab_set(struct i2o_controller *c) 663static int i2o_iop_systab_set(struct i2o_controller *c)
688{ 664{
689 struct i2o_message __iomem *msg; 665 struct i2o_message *msg;
690 u32 m;
691 i2o_status_block *sb = c->status_block.virt; 666 i2o_status_block *sb = c->status_block.virt;
692 struct device *dev = &c->pdev->dev; 667 struct device *dev = &c->pdev->dev;
693 struct resource *root; 668 struct resource *root;
@@ -735,41 +710,38 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
735 } 710 }
736 } 711 }
737 712
738 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 713 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
739 if (m == I2O_QUEUE_EMPTY) 714 if (IS_ERR(msg))
740 return -ETIMEDOUT; 715 return PTR_ERR(msg);
741 716
742 i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len, 717 i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len,
743 PCI_DMA_TODEVICE); 718 PCI_DMA_TODEVICE);
744 if (!i2o_systab.phys) { 719 if (!i2o_systab.phys) {
745 i2o_msg_nop(c, m); 720 i2o_msg_nop(c, msg);
746 return -ENOMEM; 721 return -ENOMEM;
747 } 722 }
748 723
749 writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->u.head[0]); 724 msg->u.head[0] = cpu_to_le32(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6);
750 writel(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | ADAPTER_TID, 725 msg->u.head[1] =
751 &msg->u.head[1]); 726 cpu_to_le32(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 |
727 ADAPTER_TID);
752 728
753 /* 729 /*
754 * Provide three SGL-elements: 730 * Provide three SGL-elements:
755 * System table (SysTab), Private memory space declaration and 731 * System table (SysTab), Private memory space declaration and
756 * Private i/o space declaration 732 * Private i/o space declaration
757 *
758 * FIXME: is this still true?
759 * Nasty one here. We can't use dma_alloc_coherent to send the
760 * same table to everyone. We have to go remap it for them all
761 */ 733 */
762 734
763 writel(c->unit + 2, &msg->body[0]); 735 msg->body[0] = cpu_to_le32(c->unit + 2);
764 writel(0, &msg->body[1]); 736 msg->body[1] = cpu_to_le32(0x00000000);
765 writel(0x54000000 | i2o_systab.len, &msg->body[2]); 737 msg->body[2] = cpu_to_le32(0x54000000 | i2o_systab.len);
766 writel(i2o_systab.phys, &msg->body[3]); 738 msg->body[3] = cpu_to_le32(i2o_systab.phys);
767 writel(0x54000000 | sb->current_mem_size, &msg->body[4]); 739 msg->body[4] = cpu_to_le32(0x54000000 | sb->current_mem_size);
768 writel(sb->current_mem_base, &msg->body[5]); 740 msg->body[5] = cpu_to_le32(sb->current_mem_base);
769 writel(0xd4000000 | sb->current_io_size, &msg->body[6]); 741 msg->body[6] = cpu_to_le32(0xd4000000 | sb->current_io_size);
770 writel(sb->current_io_base, &msg->body[6]); 742 msg->body[6] = cpu_to_le32(sb->current_io_base);
771 743
772 rc = i2o_msg_post_wait(c, m, 120); 744 rc = i2o_msg_post_wait(c, msg, 120);
773 745
774 dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len, 746 dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len,
775 PCI_DMA_TODEVICE); 747 PCI_DMA_TODEVICE);
@@ -780,8 +752,6 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
780 else 752 else
781 osm_debug("%s: SysTab set.\n", c->name); 753 osm_debug("%s: SysTab set.\n", c->name);
782 754
783 i2o_status_get(c); // Entered READY state
784
785 return rc; 755 return rc;
786} 756}
787 757
@@ -791,7 +761,7 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
791 * 761 *
792 * Send the system table and enable the I2O controller. 762 * Send the system table and enable the I2O controller.
793 * 763 *
794 * Returns 0 on success or negativer error code on failure. 764 * Returns 0 on success or negative error code on failure.
795 */ 765 */
796static int i2o_iop_online(struct i2o_controller *c) 766static int i2o_iop_online(struct i2o_controller *c)
797{ 767{
@@ -830,7 +800,6 @@ void i2o_iop_remove(struct i2o_controller *c)
830 list_for_each_entry_safe(dev, tmp, &c->devices, list) 800 list_for_each_entry_safe(dev, tmp, &c->devices, list)
831 i2o_device_remove(dev); 801 i2o_device_remove(dev);
832 802
833 class_device_unregister(c->classdev);
834 device_del(&c->device); 803 device_del(&c->device);
835 804
836 /* Ask the IOP to switch to RESET state */ 805 /* Ask the IOP to switch to RESET state */
@@ -869,12 +838,11 @@ static int i2o_systab_build(void)
869 i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers * 838 i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers *
870 sizeof(struct i2o_sys_tbl_entry); 839 sizeof(struct i2o_sys_tbl_entry);
871 840
872 systab = i2o_systab.virt = kmalloc(i2o_systab.len, GFP_KERNEL); 841 systab = i2o_systab.virt = kzalloc(i2o_systab.len, GFP_KERNEL);
873 if (!systab) { 842 if (!systab) {
874 osm_err("unable to allocate memory for System Table\n"); 843 osm_err("unable to allocate memory for System Table\n");
875 return -ENOMEM; 844 return -ENOMEM;
876 } 845 }
877 memset(systab, 0, i2o_systab.len);
878 846
879 systab->version = I2OVERSION; 847 systab->version = I2OVERSION;
880 systab->change_ind = change_ind + 1; 848 systab->change_ind = change_ind + 1;
@@ -952,30 +920,30 @@ static int i2o_parse_hrt(struct i2o_controller *c)
952 */ 920 */
953int i2o_status_get(struct i2o_controller *c) 921int i2o_status_get(struct i2o_controller *c)
954{ 922{
955 struct i2o_message __iomem *msg; 923 struct i2o_message *msg;
956 u32 m;
957 volatile u8 *status_block; 924 volatile u8 *status_block;
958 unsigned long timeout; 925 unsigned long timeout;
959 926
960 status_block = (u8 *) c->status_block.virt; 927 status_block = (u8 *) c->status_block.virt;
961 memset(c->status_block.virt, 0, sizeof(i2o_status_block)); 928 memset(c->status_block.virt, 0, sizeof(i2o_status_block));
962 929
963 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 930 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
964 if (m == I2O_QUEUE_EMPTY) 931 if (IS_ERR(msg))
965 return -ETIMEDOUT; 932 return PTR_ERR(msg);
966 933
967 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 934 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_0);
968 writel(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | ADAPTER_TID, 935 msg->u.head[1] =
969 &msg->u.head[1]); 936 cpu_to_le32(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 |
970 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 937 ADAPTER_TID);
971 writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context 938 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
972 writel(0, &msg->body[0]); 939 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
973 writel(0, &msg->body[1]); 940 msg->body[0] = cpu_to_le32(0x00000000);
974 writel(i2o_dma_low(c->status_block.phys), &msg->body[2]); 941 msg->body[1] = cpu_to_le32(0x00000000);
975 writel(i2o_dma_high(c->status_block.phys), &msg->body[3]); 942 msg->body[2] = cpu_to_le32(i2o_dma_low(c->status_block.phys));
976 writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */ 943 msg->body[3] = cpu_to_le32(i2o_dma_high(c->status_block.phys));
944 msg->body[4] = cpu_to_le32(sizeof(i2o_status_block)); /* always 88 bytes */
977 945
978 i2o_msg_post(c, m); 946 i2o_msg_post(c, msg);
979 947
980 /* Wait for a reply */ 948 /* Wait for a reply */
981 timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ; 949 timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ;
@@ -1002,7 +970,7 @@ int i2o_status_get(struct i2o_controller *c)
1002 * The HRT contains information about possible hidden devices but is 970 * The HRT contains information about possible hidden devices but is
1003 * mostly useless to us. 971 * mostly useless to us.
1004 * 972 *
1005 * Returns 0 on success or negativer error code on failure. 973 * Returns 0 on success or negative error code on failure.
1006 */ 974 */
1007static int i2o_hrt_get(struct i2o_controller *c) 975static int i2o_hrt_get(struct i2o_controller *c)
1008{ 976{
@@ -1013,20 +981,20 @@ static int i2o_hrt_get(struct i2o_controller *c)
1013 struct device *dev = &c->pdev->dev; 981 struct device *dev = &c->pdev->dev;
1014 982
1015 for (i = 0; i < I2O_HRT_GET_TRIES; i++) { 983 for (i = 0; i < I2O_HRT_GET_TRIES; i++) {
1016 struct i2o_message __iomem *msg; 984 struct i2o_message *msg;
1017 u32 m;
1018 985
1019 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 986 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
1020 if (m == I2O_QUEUE_EMPTY) 987 if (IS_ERR(msg))
1021 return -ETIMEDOUT; 988 return PTR_ERR(msg);
1022 989
1023 writel(SIX_WORD_MSG_SIZE | SGL_OFFSET_4, &msg->u.head[0]); 990 msg->u.head[0] = cpu_to_le32(SIX_WORD_MSG_SIZE | SGL_OFFSET_4);
1024 writel(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | ADAPTER_TID, 991 msg->u.head[1] =
1025 &msg->u.head[1]); 992 cpu_to_le32(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 |
1026 writel(0xd0000000 | c->hrt.len, &msg->body[0]); 993 ADAPTER_TID);
1027 writel(c->hrt.phys, &msg->body[1]); 994 msg->body[0] = cpu_to_le32(0xd0000000 | c->hrt.len);
995 msg->body[1] = cpu_to_le32(c->hrt.phys);
1028 996
1029 rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt); 997 rc = i2o_msg_post_wait_mem(c, msg, 20, &c->hrt);
1030 998
1031 if (rc < 0) { 999 if (rc < 0) {
1032 osm_err("%s: Unable to get HRT (status=%#x)\n", c->name, 1000 osm_err("%s: Unable to get HRT (status=%#x)\n", c->name,
@@ -1051,15 +1019,6 @@ static int i2o_hrt_get(struct i2o_controller *c)
1051} 1019}
1052 1020
1053/** 1021/**
1054 * i2o_iop_free - Free the i2o_controller struct
1055 * @c: I2O controller to free
1056 */
1057void i2o_iop_free(struct i2o_controller *c)
1058{
1059 kfree(c);
1060};
1061
1062/**
1063 * i2o_iop_release - release the memory for a I2O controller 1022 * i2o_iop_release - release the memory for a I2O controller
1064 * @dev: I2O controller which should be released 1023 * @dev: I2O controller which should be released
1065 * 1024 *
@@ -1073,14 +1032,11 @@ static void i2o_iop_release(struct device *dev)
1073 i2o_iop_free(c); 1032 i2o_iop_free(c);
1074}; 1033};
1075 1034
1076/* I2O controller class */
1077static struct class *i2o_controller_class;
1078
1079/** 1035/**
1080 * i2o_iop_alloc - Allocate and initialize a i2o_controller struct 1036 * i2o_iop_alloc - Allocate and initialize a i2o_controller struct
1081 * 1037 *
1082 * Allocate the necessary memory for a i2o_controller struct and 1038 * Allocate the necessary memory for a i2o_controller struct and
1083 * initialize the lists. 1039 * initialize the lists and message mempool.
1084 * 1040 *
1085 * Returns a pointer to the I2O controller or a negative error code on 1041 * Returns a pointer to the I2O controller or a negative error code on
1086 * failure. 1042 * failure.
@@ -1089,20 +1045,29 @@ struct i2o_controller *i2o_iop_alloc(void)
1089{ 1045{
1090 static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */ 1046 static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */
1091 struct i2o_controller *c; 1047 struct i2o_controller *c;
1048 char poolname[32];
1092 1049
1093 c = kmalloc(sizeof(*c), GFP_KERNEL); 1050 c = kzalloc(sizeof(*c), GFP_KERNEL);
1094 if (!c) { 1051 if (!c) {
1095 osm_err("i2o: Insufficient memory to allocate a I2O controller." 1052 osm_err("i2o: Insufficient memory to allocate a I2O controller."
1096 "\n"); 1053 "\n");
1097 return ERR_PTR(-ENOMEM); 1054 return ERR_PTR(-ENOMEM);
1098 } 1055 }
1099 memset(c, 0, sizeof(*c)); 1056
1057 c->unit = unit++;
1058 sprintf(c->name, "iop%d", c->unit);
1059
1060 snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name);
1061 if (i2o_pool_alloc
1062 (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4,
1063 I2O_MSG_INPOOL_MIN)) {
1064 kfree(c);
1065 return ERR_PTR(-ENOMEM);
1066 };
1100 1067
1101 INIT_LIST_HEAD(&c->devices); 1068 INIT_LIST_HEAD(&c->devices);
1102 spin_lock_init(&c->lock); 1069 spin_lock_init(&c->lock);
1103 init_MUTEX(&c->lct_lock); 1070 init_MUTEX(&c->lct_lock);
1104 c->unit = unit++;
1105 sprintf(c->name, "iop%d", c->unit);
1106 1071
1107 device_initialize(&c->device); 1072 device_initialize(&c->device);
1108 1073
@@ -1137,36 +1102,29 @@ int i2o_iop_add(struct i2o_controller *c)
1137 goto iop_reset; 1102 goto iop_reset;
1138 } 1103 }
1139 1104
1140 c->classdev = class_device_create(i2o_controller_class, NULL, MKDEV(0,0),
1141 &c->device, "iop%d", c->unit);
1142 if (IS_ERR(c->classdev)) {
1143 osm_err("%s: could not add controller class\n", c->name);
1144 goto device_del;
1145 }
1146
1147 osm_info("%s: Activating I2O controller...\n", c->name); 1105 osm_info("%s: Activating I2O controller...\n", c->name);
1148 osm_info("%s: This may take a few minutes if there are many devices\n", 1106 osm_info("%s: This may take a few minutes if there are many devices\n",
1149 c->name); 1107 c->name);
1150 1108
1151 if ((rc = i2o_iop_activate(c))) { 1109 if ((rc = i2o_iop_activate(c))) {
1152 osm_err("%s: could not activate controller\n", c->name); 1110 osm_err("%s: could not activate controller\n", c->name);
1153 goto class_del; 1111 goto device_del;
1154 } 1112 }
1155 1113
1156 osm_debug("%s: building sys table...\n", c->name); 1114 osm_debug("%s: building sys table...\n", c->name);
1157 1115
1158 if ((rc = i2o_systab_build())) 1116 if ((rc = i2o_systab_build()))
1159 goto class_del; 1117 goto device_del;
1160 1118
1161 osm_debug("%s: online controller...\n", c->name); 1119 osm_debug("%s: online controller...\n", c->name);
1162 1120
1163 if ((rc = i2o_iop_online(c))) 1121 if ((rc = i2o_iop_online(c)))
1164 goto class_del; 1122 goto device_del;
1165 1123
1166 osm_debug("%s: getting LCT...\n", c->name); 1124 osm_debug("%s: getting LCT...\n", c->name);
1167 1125
1168 if ((rc = i2o_exec_lct_get(c))) 1126 if ((rc = i2o_exec_lct_get(c)))
1169 goto class_del; 1127 goto device_del;
1170 1128
1171 list_add(&c->list, &i2o_controllers); 1129 list_add(&c->list, &i2o_controllers);
1172 1130
@@ -1176,9 +1134,6 @@ int i2o_iop_add(struct i2o_controller *c)
1176 1134
1177 return 0; 1135 return 0;
1178 1136
1179 class_del:
1180 class_device_unregister(c->classdev);
1181
1182 device_del: 1137 device_del:
1183 device_del(&c->device); 1138 device_del(&c->device);
1184 1139
@@ -1199,28 +1154,27 @@ int i2o_iop_add(struct i2o_controller *c)
1199 * is waited for, or expected. If you do not want further notifications, 1154 * is waited for, or expected. If you do not want further notifications,
1200 * call the i2o_event_register again with a evt_mask of 0. 1155 * call the i2o_event_register again with a evt_mask of 0.
1201 * 1156 *
1202 * Returns 0 on success or -ETIMEDOUT if no message could be fetched for 1157 * Returns 0 on success or negative error code on failure.
1203 * sending the request.
1204 */ 1158 */
1205int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv, 1159int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv,
1206 int tcntxt, u32 evt_mask) 1160 int tcntxt, u32 evt_mask)
1207{ 1161{
1208 struct i2o_controller *c = dev->iop; 1162 struct i2o_controller *c = dev->iop;
1209 struct i2o_message __iomem *msg; 1163 struct i2o_message *msg;
1210 u32 m;
1211 1164
1212 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 1165 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
1213 if (m == I2O_QUEUE_EMPTY) 1166 if (IS_ERR(msg))
1214 return -ETIMEDOUT; 1167 return PTR_ERR(msg);
1215 1168
1216 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 1169 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
1217 writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->lct_data. 1170 msg->u.head[1] =
1218 tid, &msg->u.head[1]); 1171 cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->
1219 writel(drv->context, &msg->u.s.icntxt); 1172 lct_data.tid);
1220 writel(tcntxt, &msg->u.s.tcntxt); 1173 msg->u.s.icntxt = cpu_to_le32(drv->context);
1221 writel(evt_mask, &msg->body[0]); 1174 msg->u.s.tcntxt = cpu_to_le32(tcntxt);
1175 msg->body[0] = cpu_to_le32(evt_mask);
1222 1176
1223 i2o_msg_post(c, m); 1177 i2o_msg_post(c, msg);
1224 1178
1225 return 0; 1179 return 0;
1226}; 1180};
@@ -1239,14 +1193,8 @@ static int __init i2o_iop_init(void)
1239 1193
1240 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); 1194 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
1241 1195
1242 i2o_controller_class = class_create(THIS_MODULE, "i2o_controller");
1243 if (IS_ERR(i2o_controller_class)) {
1244 osm_err("can't register class i2o_controller\n");
1245 goto exit;
1246 }
1247
1248 if ((rc = i2o_driver_init())) 1196 if ((rc = i2o_driver_init()))
1249 goto class_exit; 1197 goto exit;
1250 1198
1251 if ((rc = i2o_exec_init())) 1199 if ((rc = i2o_exec_init()))
1252 goto driver_exit; 1200 goto driver_exit;
@@ -1262,9 +1210,6 @@ static int __init i2o_iop_init(void)
1262 driver_exit: 1210 driver_exit:
1263 i2o_driver_exit(); 1211 i2o_driver_exit();
1264 1212
1265 class_exit:
1266 class_destroy(i2o_controller_class);
1267
1268 exit: 1213 exit:
1269 return rc; 1214 return rc;
1270} 1215}
@@ -1279,7 +1224,6 @@ static void __exit i2o_iop_exit(void)
1279 i2o_pci_exit(); 1224 i2o_pci_exit();
1280 i2o_exec_exit(); 1225 i2o_exec_exit();
1281 i2o_driver_exit(); 1226 i2o_driver_exit();
1282 class_destroy(i2o_controller_class);
1283}; 1227};
1284 1228
1285module_init(i2o_iop_init); 1229module_init(i2o_iop_init);
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index ee7075fa1ec3..c5b656cdea7c 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -339,7 +339,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
339 pci_name(pdev)); 339 pci_name(pdev));
340 340
341 c->pdev = pdev; 341 c->pdev = pdev;
342 c->device.parent = get_device(&pdev->dev); 342 c->device.parent = &pdev->dev;
343 343
344 /* Cards that fall apart if you hit them with large I/O loads... */ 344 /* Cards that fall apart if you hit them with large I/O loads... */
345 if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) { 345 if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) {
@@ -410,8 +410,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
410 if ((rc = i2o_iop_add(c))) 410 if ((rc = i2o_iop_add(c)))
411 goto uninstall; 411 goto uninstall;
412 412
413 get_device(&c->device);
414
415 if (i960) 413 if (i960)
416 pci_write_config_word(i960, 0x42, 0x03ff); 414 pci_write_config_word(i960, 0x42, 0x03ff);
417 415
@@ -424,7 +422,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
424 i2o_pci_free(c); 422 i2o_pci_free(c);
425 423
426 free_controller: 424 free_controller:
427 put_device(c->device.parent);
428 i2o_iop_free(c); 425 i2o_iop_free(c);
429 426
430 disable: 427 disable:
@@ -454,7 +451,6 @@ static void __devexit i2o_pci_remove(struct pci_dev *pdev)
454 451
455 printk(KERN_INFO "%s: Controller removed.\n", c->name); 452 printk(KERN_INFO "%s: Controller removed.\n", c->name);
456 453
457 put_device(c->device.parent);
458 put_device(&c->device); 454 put_device(&c->device);
459}; 455};
460 456
@@ -483,4 +479,5 @@ void __exit i2o_pci_exit(void)
483{ 479{
484 pci_unregister_driver(&i2o_pci_driver); 480 pci_unregister_driver(&i2o_pci_driver);
485}; 481};
482
486MODULE_DEVICE_TABLE(pci, i2o_pci_ids); 483MODULE_DEVICE_TABLE(pci, i2o_pci_ids);