aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/aacraid/commsup.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-09-24 01:52:47 -0400
committerJeff Garzik <jeff@garzik.org>2006-09-24 01:52:47 -0400
commit23930fa1cebfea6f79881c588ccd1b0781e49e3f (patch)
tree36d29e3f83661c4f5f45b6f74ac0d5f9886867a8 /drivers/scsi/aacraid/commsup.c
parent36b35a5be0e4b406acd816e2122d153e875105be (diff)
parent4f5537de7c1531398e84e18a24f667e49cc94208 (diff)
Merge branch 'master' into upstream
Diffstat (limited to 'drivers/scsi/aacraid/commsup.c')
-rw-r--r--drivers/scsi/aacraid/commsup.c279
1 files changed, 270 insertions, 9 deletions
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 3f27419c66af..8734a045558e 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -40,8 +40,10 @@
40#include <linux/blkdev.h> 40#include <linux/blkdev.h>
41#include <linux/delay.h> 41#include <linux/delay.h>
42#include <linux/kthread.h> 42#include <linux/kthread.h>
43#include <scsi/scsi.h>
43#include <scsi/scsi_host.h> 44#include <scsi/scsi_host.h>
44#include <scsi/scsi_device.h> 45#include <scsi/scsi_device.h>
46#include <scsi/scsi_cmnd.h>
45#include <asm/semaphore.h> 47#include <asm/semaphore.h>
46 48
47#include "aacraid.h" 49#include "aacraid.h"
@@ -464,6 +466,8 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
464 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); 466 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
465 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); 467 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
466 468
469 if (!dev->queues)
470 return -EBUSY;
467 q = &dev->queues->queue[AdapNormCmdQueue]; 471 q = &dev->queues->queue[AdapNormCmdQueue];
468 472
469 if(wait) 473 if(wait)
@@ -527,8 +531,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
527 } 531 }
528 udelay(5); 532 udelay(5);
529 } 533 }
530 } else 534 } else if (down_interruptible(&fibptr->event_wait)) {
531 down(&fibptr->event_wait); 535 spin_lock_irqsave(&fibptr->event_lock, flags);
536 if (fibptr->done == 0) {
537 fibptr->done = 2; /* Tell interrupt we aborted */
538 spin_unlock_irqrestore(&fibptr->event_lock, flags);
539 return -EINTR;
540 }
541 spin_unlock_irqrestore(&fibptr->event_lock, flags);
542 }
532 BUG_ON(fibptr->done == 0); 543 BUG_ON(fibptr->done == 0);
533 544
534 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){ 545 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
@@ -795,7 +806,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
795 806
796 /* Sniff for container changes */ 807 /* Sniff for container changes */
797 808
798 if (!dev) 809 if (!dev || !dev->fsa_dev)
799 return; 810 return;
800 container = (u32)-1; 811 container = (u32)-1;
801 812
@@ -1022,13 +1033,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1022 if (device) { 1033 if (device) {
1023 switch (device_config_needed) { 1034 switch (device_config_needed) {
1024 case DELETE: 1035 case DELETE:
1025 scsi_remove_device(device);
1026 break;
1027 case CHANGE: 1036 case CHANGE:
1028 if (!dev->fsa_dev[container].valid) {
1029 scsi_remove_device(device);
1030 break;
1031 }
1032 scsi_rescan_device(&device->sdev_gendev); 1037 scsi_rescan_device(&device->sdev_gendev);
1033 1038
1034 default: 1039 default:
@@ -1045,6 +1050,262 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1045 1050
1046} 1051}
1047 1052
1053static int _aac_reset_adapter(struct aac_dev *aac)
1054{
1055 int index, quirks;
1056 u32 ret;
1057 int retval;
1058 struct Scsi_Host *host;
1059 struct scsi_device *dev;
1060 struct scsi_cmnd *command;
1061 struct scsi_cmnd *command_list;
1062
1063 /*
1064 * Assumptions:
1065 * - host is locked.
1066 * - in_reset is asserted, so no new i/o is getting to the
1067 * card.
1068 * - The card is dead.
1069 */
1070 host = aac->scsi_host_ptr;
1071 scsi_block_requests(host);
1072 aac_adapter_disable_int(aac);
1073 spin_unlock_irq(host->host_lock);
1074 kthread_stop(aac->thread);
1075
1076 /*
1077 * If a positive health, means in a known DEAD PANIC
1078 * state and the adapter could be reset to `try again'.
1079 */
1080 retval = aac_adapter_check_health(aac);
1081 if (retval == 0)
1082 retval = aac_adapter_sync_cmd(aac, IOP_RESET_ALWAYS,
1083 0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL);
1084 if (retval)
1085 retval = aac_adapter_sync_cmd(aac, IOP_RESET,
1086 0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL);
1087
1088 if (retval)
1089 goto out;
1090 if (ret != 0x00000001) {
1091 retval = -ENODEV;
1092 goto out;
1093 }
1094
1095 index = aac->cardtype;
1096
1097 /*
1098 * Re-initialize the adapter, first free resources, then carefully
1099 * apply the initialization sequence to come back again. Only risk
1100 * is a change in Firmware dropping cache, it is assumed the caller
1101 * will ensure that i/o is queisced and the card is flushed in that
1102 * case.
1103 */
1104 aac_fib_map_free(aac);
1105 aac->hw_fib_va = NULL;
1106 aac->hw_fib_pa = 0;
1107 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1108 aac->comm_addr = NULL;
1109 aac->comm_phys = 0;
1110 kfree(aac->queues);
1111 aac->queues = NULL;
1112 free_irq(aac->pdev->irq, aac);
1113 kfree(aac->fsa_dev);
1114 aac->fsa_dev = NULL;
1115 if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) {
1116 if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
1117 ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
1118 goto out;
1119 } else {
1120 if (((retval = pci_set_dma_mask(aac->pdev, 0x7FFFFFFFULL))) ||
1121 ((retval = pci_set_consistent_dma_mask(aac->pdev, 0x7FFFFFFFULL))))
1122 goto out;
1123 }
1124 if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1125 goto out;
1126 if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT)
1127 if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
1128 goto out;
1129 aac->thread = kthread_run(aac_command_thread, aac, aac->name);
1130 if (IS_ERR(aac->thread)) {
1131 retval = PTR_ERR(aac->thread);
1132 goto out;
1133 }
1134 (void)aac_get_adapter_info(aac);
1135 quirks = aac_get_driver_ident(index)->quirks;
1136 if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1137 host->sg_tablesize = 34;
1138 host->max_sectors = (host->sg_tablesize * 8) + 112;
1139 }
1140 if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1141 host->sg_tablesize = 17;
1142 host->max_sectors = (host->sg_tablesize * 8) + 112;
1143 }
1144 aac_get_config_status(aac, 1);
1145 aac_get_containers(aac);
1146 /*
1147 * This is where the assumption that the Adapter is quiesced
1148 * is important.
1149 */
1150 command_list = NULL;
1151 __shost_for_each_device(dev, host) {
1152 unsigned long flags;
1153 spin_lock_irqsave(&dev->list_lock, flags);
1154 list_for_each_entry(command, &dev->cmd_list, list)
1155 if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1156 command->SCp.buffer = (struct scatterlist *)command_list;
1157 command_list = command;
1158 }
1159 spin_unlock_irqrestore(&dev->list_lock, flags);
1160 }
1161 while ((command = command_list)) {
1162 command_list = (struct scsi_cmnd *)command->SCp.buffer;
1163 command->SCp.buffer = NULL;
1164 command->result = DID_OK << 16
1165 | COMMAND_COMPLETE << 8
1166 | SAM_STAT_TASK_SET_FULL;
1167 command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1168 command->scsi_done(command);
1169 }
1170 retval = 0;
1171
1172out:
1173 aac->in_reset = 0;
1174 scsi_unblock_requests(host);
1175 spin_lock_irq(host->host_lock);
1176 return retval;
1177}
1178
1179int aac_check_health(struct aac_dev * aac)
1180{
1181 int BlinkLED;
1182 unsigned long time_now, flagv = 0;
1183 struct list_head * entry;
1184 struct Scsi_Host * host;
1185
1186 /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1187 if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1188 return 0;
1189
1190 if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1191 spin_unlock_irqrestore(&aac->fib_lock, flagv);
1192 return 0; /* OK */
1193 }
1194
1195 aac->in_reset = 1;
1196
1197 /* Fake up an AIF:
1198 * aac_aifcmd.command = AifCmdEventNotify = 1
1199 * aac_aifcmd.seqnum = 0xFFFFFFFF
1200 * aac_aifcmd.data[0] = AifEnExpEvent = 23
1201 * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1202 * aac.aifcmd.data[2] = AifHighPriority = 3
1203 * aac.aifcmd.data[3] = BlinkLED
1204 */
1205
1206 time_now = jiffies/HZ;
1207 entry = aac->fib_list.next;
1208
1209 /*
1210 * For each Context that is on the
1211 * fibctxList, make a copy of the
1212 * fib, and then set the event to wake up the
1213 * thread that is waiting for it.
1214 */
1215 while (entry != &aac->fib_list) {
1216 /*
1217 * Extract the fibctx
1218 */
1219 struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1220 struct hw_fib * hw_fib;
1221 struct fib * fib;
1222 /*
1223 * Check if the queue is getting
1224 * backlogged
1225 */
1226 if (fibctx->count > 20) {
1227 /*
1228 * It's *not* jiffies folks,
1229 * but jiffies / HZ, so do not
1230 * panic ...
1231 */
1232 u32 time_last = fibctx->jiffies;
1233 /*
1234 * Has it been > 2 minutes
1235 * since the last read off
1236 * the queue?
1237 */
1238 if ((time_now - time_last) > aif_timeout) {
1239 entry = entry->next;
1240 aac_close_fib_context(aac, fibctx);
1241 continue;
1242 }
1243 }
1244 /*
1245 * Warning: no sleep allowed while
1246 * holding spinlock
1247 */
1248 hw_fib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1249 fib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
1250 if (fib && hw_fib) {
1251 struct aac_aifcmd * aif;
1252
1253 memset(hw_fib, 0, sizeof(struct hw_fib));
1254 memset(fib, 0, sizeof(struct fib));
1255 fib->hw_fib = hw_fib;
1256 fib->dev = aac;
1257 aac_fib_init(fib);
1258 fib->type = FSAFS_NTC_FIB_CONTEXT;
1259 fib->size = sizeof (struct fib);
1260 fib->data = hw_fib->data;
1261 aif = (struct aac_aifcmd *)hw_fib->data;
1262 aif->command = cpu_to_le32(AifCmdEventNotify);
1263 aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1264 aif->data[0] = cpu_to_le32(AifEnExpEvent);
1265 aif->data[1] = cpu_to_le32(AifExeFirmwarePanic);
1266 aif->data[2] = cpu_to_le32(AifHighPriority);
1267 aif->data[3] = cpu_to_le32(BlinkLED);
1268
1269 /*
1270 * Put the FIB onto the
1271 * fibctx's fibs
1272 */
1273 list_add_tail(&fib->fiblink, &fibctx->fib_list);
1274 fibctx->count++;
1275 /*
1276 * Set the event to wake up the
1277 * thread that will waiting.
1278 */
1279 up(&fibctx->wait_sem);
1280 } else {
1281 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1282 kfree(fib);
1283 kfree(hw_fib);
1284 }
1285 entry = entry->next;
1286 }
1287
1288 spin_unlock_irqrestore(&aac->fib_lock, flagv);
1289
1290 if (BlinkLED < 0) {
1291 printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
1292 goto out;
1293 }
1294
1295 printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1296
1297 host = aac->scsi_host_ptr;
1298 spin_lock_irqsave(host->host_lock, flagv);
1299 BlinkLED = _aac_reset_adapter(aac);
1300 spin_unlock_irqrestore(host->host_lock, flagv);
1301 return BlinkLED;
1302
1303out:
1304 aac->in_reset = 0;
1305 return BlinkLED;
1306}
1307
1308
1048/** 1309/**
1049 * aac_command_thread - command processing thread 1310 * aac_command_thread - command processing thread
1050 * @dev: Adapter to monitor 1311 * @dev: Adapter to monitor