diff options
author | Mahesh Rajashekhara <Mahesh.Rajashekhara@pmcs.com> | 2015-03-26 10:41:30 -0400 |
---|---|---|
committer | James Bottomley <JBottomley@Odin.com> | 2015-04-09 19:55:21 -0400 |
commit | ef6162333a6910007c0ae2237e750ffd5bf25811 (patch) | |
tree | 4c98934c82f74a61ff394aed1f16041129748e62 /drivers/scsi | |
parent | f9c4259678cbde854a4e94398d66ef379178fd7c (diff) |
aacraid: performance improvement changes
Signed-off-by: Mahesh Rajashekhara <Mahesh.Rajashekhara@pmcs.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Murthy Bhat <Murthy.Bhat@pmcs.com>
Signed-off-by: James Bottomley <JBottomley@Odin.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/aacraid/aacraid.h | 5 | ||||
-rw-r--r-- | drivers/scsi/aacraid/comminit.c | 4 | ||||
-rw-r--r-- | drivers/scsi/aacraid/commsup.c | 37 | ||||
-rw-r--r-- | drivers/scsi/aacraid/dpcsup.c | 4 | ||||
-rw-r--r-- | drivers/scsi/aacraid/linit.c | 37 | ||||
-rw-r--r-- | drivers/scsi/aacraid/rx.c | 14 | ||||
-rw-r--r-- | drivers/scsi/aacraid/src.c | 14 |
7 files changed, 57 insertions, 58 deletions
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 43d80763bb7d..2ba158b50fa2 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
@@ -12,7 +12,7 @@ | |||
12 | * D E F I N E S | 12 | * D E F I N E S |
13 | *----------------------------------------------------------------------------*/ | 13 | *----------------------------------------------------------------------------*/ |
14 | 14 | ||
15 | #define AAC_MAX_MSIX 32 /* vectors */ | 15 | #define AAC_MAX_MSIX 8 /* vectors */ |
16 | #define AAC_PCI_MSI_ENABLE 0x8000 | 16 | #define AAC_PCI_MSI_ENABLE 0x8000 |
17 | 17 | ||
18 | enum { | 18 | enum { |
@@ -633,7 +633,8 @@ struct aac_queue { | |||
633 | spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */ | 633 | spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */ |
634 | struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */ | 634 | struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */ |
635 | /* only valid for command queues which receive entries from the adapter. */ | 635 | /* only valid for command queues which receive entries from the adapter. */ |
636 | u32 numpending; /* Number of entries on outstanding queue. */ | 636 | /* Number of entries on outstanding queue. */ |
637 | atomic_t numpending; | ||
637 | struct aac_dev * dev; /* Back pointer to adapter structure */ | 638 | struct aac_dev * dev; /* Back pointer to adapter structure */ |
638 | }; | 639 | }; |
639 | 640 | ||
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 27432b9ea235..45db84ad322f 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c | |||
@@ -53,7 +53,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
53 | { | 53 | { |
54 | unsigned char *base; | 54 | unsigned char *base; |
55 | unsigned long size, align; | 55 | unsigned long size, align; |
56 | const unsigned long fibsize = 4096; | 56 | const unsigned long fibsize = dev->max_fib_size; |
57 | const unsigned long printfbufsiz = 256; | 57 | const unsigned long printfbufsiz = 256; |
58 | unsigned long host_rrq_size = 0; | 58 | unsigned long host_rrq_size = 0; |
59 | struct aac_init *init; | 59 | struct aac_init *init; |
@@ -182,7 +182,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
182 | 182 | ||
183 | static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize) | 183 | static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize) |
184 | { | 184 | { |
185 | q->numpending = 0; | 185 | atomic_set(&q->numpending, 0); |
186 | q->dev = dev; | 186 | q->dev = dev; |
187 | init_waitqueue_head(&q->cmdready); | 187 | init_waitqueue_head(&q->cmdready); |
188 | INIT_LIST_HEAD(&q->cmdq); | 188 | INIT_LIST_HEAD(&q->cmdq); |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index e615a0b34263..1a3c0e0068de 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -208,14 +208,10 @@ struct fib *aac_fib_alloc(struct aac_dev *dev) | |||
208 | 208 | ||
209 | void aac_fib_free(struct fib *fibptr) | 209 | void aac_fib_free(struct fib *fibptr) |
210 | { | 210 | { |
211 | unsigned long flags, flagsv; | 211 | unsigned long flags; |
212 | 212 | ||
213 | spin_lock_irqsave(&fibptr->event_lock, flagsv); | 213 | if (fibptr->done == 2) |
214 | if (fibptr->done == 2) { | ||
215 | spin_unlock_irqrestore(&fibptr->event_lock, flagsv); | ||
216 | return; | 214 | return; |
217 | } | ||
218 | spin_unlock_irqrestore(&fibptr->event_lock, flagsv); | ||
219 | 215 | ||
220 | spin_lock_irqsave(&fibptr->dev->fib_lock, flags); | 216 | spin_lock_irqsave(&fibptr->dev->fib_lock, flags); |
221 | if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) | 217 | if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) |
@@ -321,7 +317,7 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr | |||
321 | /* Queue is full */ | 317 | /* Queue is full */ |
322 | if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { | 318 | if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { |
323 | printk(KERN_WARNING "Queue %d full, %u outstanding.\n", | 319 | printk(KERN_WARNING "Queue %d full, %u outstanding.\n", |
324 | qid, q->numpending); | 320 | qid, atomic_read(&q->numpending)); |
325 | return 0; | 321 | return 0; |
326 | } else { | 322 | } else { |
327 | *entry = q->base + *index; | 323 | *entry = q->base + *index; |
@@ -414,7 +410,6 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
414 | struct aac_dev * dev = fibptr->dev; | 410 | struct aac_dev * dev = fibptr->dev; |
415 | struct hw_fib * hw_fib = fibptr->hw_fib_va; | 411 | struct hw_fib * hw_fib = fibptr->hw_fib_va; |
416 | unsigned long flags = 0; | 412 | unsigned long flags = 0; |
417 | unsigned long qflags; | ||
418 | unsigned long mflags = 0; | 413 | unsigned long mflags = 0; |
419 | unsigned long sflags = 0; | 414 | unsigned long sflags = 0; |
420 | 415 | ||
@@ -568,9 +563,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
568 | int blink; | 563 | int blink; |
569 | if (time_is_before_eq_jiffies(timeout)) { | 564 | if (time_is_before_eq_jiffies(timeout)) { |
570 | struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; | 565 | struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; |
571 | spin_lock_irqsave(q->lock, qflags); | 566 | atomic_dec(&q->numpending); |
572 | q->numpending--; | ||
573 | spin_unlock_irqrestore(q->lock, qflags); | ||
574 | if (wait == -1) { | 567 | if (wait == -1) { |
575 | printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n" | 568 | printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n" |
576 | "Usually a result of a PCI interrupt routing problem;\n" | 569 | "Usually a result of a PCI interrupt routing problem;\n" |
@@ -775,7 +768,6 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) | |||
775 | 768 | ||
776 | int aac_fib_complete(struct fib *fibptr) | 769 | int aac_fib_complete(struct fib *fibptr) |
777 | { | 770 | { |
778 | unsigned long flags; | ||
779 | struct hw_fib * hw_fib = fibptr->hw_fib_va; | 771 | struct hw_fib * hw_fib = fibptr->hw_fib_va; |
780 | 772 | ||
781 | /* | 773 | /* |
@@ -798,12 +790,6 @@ int aac_fib_complete(struct fib *fibptr) | |||
798 | * command is complete that we had sent to the adapter and this | 790 | * command is complete that we had sent to the adapter and this |
799 | * cdb could be reused. | 791 | * cdb could be reused. |
800 | */ | 792 | */ |
801 | spin_lock_irqsave(&fibptr->event_lock, flags); | ||
802 | if (fibptr->done == 2) { | ||
803 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | ||
804 | return 0; | ||
805 | } | ||
806 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | ||
807 | 793 | ||
808 | if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && | 794 | if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && |
809 | (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) | 795 | (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) |
@@ -1257,6 +1243,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) | |||
1257 | struct scsi_cmnd *command; | 1243 | struct scsi_cmnd *command; |
1258 | struct scsi_cmnd *command_list; | 1244 | struct scsi_cmnd *command_list; |
1259 | int jafo = 0; | 1245 | int jafo = 0; |
1246 | int cpu; | ||
1260 | 1247 | ||
1261 | /* | 1248 | /* |
1262 | * Assumptions: | 1249 | * Assumptions: |
@@ -1319,14 +1306,26 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) | |||
1319 | aac->comm_phys = 0; | 1306 | aac->comm_phys = 0; |
1320 | kfree(aac->queues); | 1307 | kfree(aac->queues); |
1321 | aac->queues = NULL; | 1308 | aac->queues = NULL; |
1309 | cpu = cpumask_first(cpu_online_mask); | ||
1322 | if (aac->pdev->device == PMC_DEVICE_S6 || | 1310 | if (aac->pdev->device == PMC_DEVICE_S6 || |
1323 | aac->pdev->device == PMC_DEVICE_S7 || | 1311 | aac->pdev->device == PMC_DEVICE_S7 || |
1324 | aac->pdev->device == PMC_DEVICE_S8 || | 1312 | aac->pdev->device == PMC_DEVICE_S8 || |
1325 | aac->pdev->device == PMC_DEVICE_S9) { | 1313 | aac->pdev->device == PMC_DEVICE_S9) { |
1326 | if (aac->max_msix > 1) { | 1314 | if (aac->max_msix > 1) { |
1327 | for (i = 0; i < aac->max_msix; i++) | 1315 | for (i = 0; i < aac->max_msix; i++) { |
1316 | if (irq_set_affinity_hint( | ||
1317 | aac->msixentry[i].vector, | ||
1318 | NULL)) { | ||
1319 | printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n", | ||
1320 | aac->name, | ||
1321 | aac->id, | ||
1322 | cpu); | ||
1323 | } | ||
1324 | cpu = cpumask_next(cpu, | ||
1325 | cpu_online_mask); | ||
1328 | free_irq(aac->msixentry[i].vector, | 1326 | free_irq(aac->msixentry[i].vector, |
1329 | &(aac->aac_msix[i])); | 1327 | &(aac->aac_msix[i])); |
1328 | } | ||
1330 | pci_disable_msix(aac->pdev); | 1329 | pci_disable_msix(aac->pdev); |
1331 | } else { | 1330 | } else { |
1332 | free_irq(aac->pdev->irq, &(aac->aac_msix[0])); | 1331 | free_irq(aac->pdev->irq, &(aac->aac_msix[0])); |
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index 2e394662d52f..da9d9936e995 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c | |||
@@ -84,7 +84,7 @@ unsigned int aac_response_normal(struct aac_queue * q) | |||
84 | * continue. The caller has already been notified that | 84 | * continue. The caller has already been notified that |
85 | * the fib timed out. | 85 | * the fib timed out. |
86 | */ | 86 | */ |
87 | dev->queues->queue[AdapNormCmdQueue].numpending--; | 87 | atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); |
88 | 88 | ||
89 | if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { | 89 | if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { |
90 | spin_unlock_irqrestore(q->lock, flags); | 90 | spin_unlock_irqrestore(q->lock, flags); |
@@ -354,7 +354,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, | |||
354 | * continue. The caller has already been notified that | 354 | * continue. The caller has already been notified that |
355 | * the fib timed out. | 355 | * the fib timed out. |
356 | */ | 356 | */ |
357 | dev->queues->queue[AdapNormCmdQueue].numpending--; | 357 | atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); |
358 | 358 | ||
359 | if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { | 359 | if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { |
360 | aac_fib_complete(fib); | 360 | aac_fib_complete(fib); |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 36653a9d9ade..7bbf1b368aa8 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -251,27 +251,15 @@ static struct aac_driver_ident aac_drivers[] = { | |||
251 | * TODO: unify with aac_scsi_cmd(). | 251 | * TODO: unify with aac_scsi_cmd(). |
252 | */ | 252 | */ |
253 | 253 | ||
254 | static int aac_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 254 | static int aac_queuecommand(struct Scsi_Host *shost, |
255 | struct scsi_cmnd *cmd) | ||
255 | { | 256 | { |
256 | struct Scsi_Host *host = cmd->device->host; | 257 | int r = 0; |
257 | struct aac_dev *dev = (struct aac_dev *)host->hostdata; | ||
258 | u32 count = 0; | ||
259 | cmd->scsi_done = done; | ||
260 | for (; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { | ||
261 | struct fib * fib = &dev->fibs[count]; | ||
262 | struct scsi_cmnd * command; | ||
263 | if (fib->hw_fib_va->header.XferState && | ||
264 | ((command = fib->callback_data)) && | ||
265 | (command == cmd) && | ||
266 | (cmd->SCp.phase == AAC_OWNER_FIRMWARE)) | ||
267 | return 0; /* Already owned by Adapter */ | ||
268 | } | ||
269 | cmd->SCp.phase = AAC_OWNER_LOWLEVEL; | 258 | cmd->SCp.phase = AAC_OWNER_LOWLEVEL; |
270 | return (aac_scsi_cmd(cmd) ? FAILED : 0); | 259 | r = (aac_scsi_cmd(cmd) ? FAILED : 0); |
260 | return r; | ||
271 | } | 261 | } |
272 | 262 | ||
273 | static DEF_SCSI_QCMD(aac_queuecommand) | ||
274 | |||
275 | /** | 263 | /** |
276 | * aac_info - Returns the host adapter name | 264 | * aac_info - Returns the host adapter name |
277 | * @shost: Scsi host to report on | 265 | * @shost: Scsi host to report on |
@@ -1085,6 +1073,7 @@ static struct scsi_host_template aac_driver_template = { | |||
1085 | static void __aac_shutdown(struct aac_dev * aac) | 1073 | static void __aac_shutdown(struct aac_dev * aac) |
1086 | { | 1074 | { |
1087 | int i; | 1075 | int i; |
1076 | int cpu; | ||
1088 | 1077 | ||
1089 | if (aac->aif_thread) { | 1078 | if (aac->aif_thread) { |
1090 | int i; | 1079 | int i; |
@@ -1099,14 +1088,26 @@ static void __aac_shutdown(struct aac_dev * aac) | |||
1099 | } | 1088 | } |
1100 | aac_send_shutdown(aac); | 1089 | aac_send_shutdown(aac); |
1101 | aac_adapter_disable_int(aac); | 1090 | aac_adapter_disable_int(aac); |
1091 | cpu = cpumask_first(cpu_online_mask); | ||
1102 | if (aac->pdev->device == PMC_DEVICE_S6 || | 1092 | if (aac->pdev->device == PMC_DEVICE_S6 || |
1103 | aac->pdev->device == PMC_DEVICE_S7 || | 1093 | aac->pdev->device == PMC_DEVICE_S7 || |
1104 | aac->pdev->device == PMC_DEVICE_S8 || | 1094 | aac->pdev->device == PMC_DEVICE_S8 || |
1105 | aac->pdev->device == PMC_DEVICE_S9) { | 1095 | aac->pdev->device == PMC_DEVICE_S9) { |
1106 | if (aac->max_msix > 1) { | 1096 | if (aac->max_msix > 1) { |
1107 | for (i = 0; i < aac->max_msix; i++) | 1097 | for (i = 0; i < aac->max_msix; i++) { |
1098 | if (irq_set_affinity_hint( | ||
1099 | aac->msixentry[i].vector, | ||
1100 | NULL)) { | ||
1101 | printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n", | ||
1102 | aac->name, | ||
1103 | aac->id, | ||
1104 | cpu); | ||
1105 | } | ||
1106 | cpu = cpumask_next(cpu, | ||
1107 | cpu_online_mask); | ||
1108 | free_irq(aac->msixentry[i].vector, | 1108 | free_irq(aac->msixentry[i].vector, |
1109 | &(aac->aac_msix[i])); | 1109 | &(aac->aac_msix[i])); |
1110 | } | ||
1110 | } else { | 1111 | } else { |
1111 | free_irq(aac->pdev->irq, | 1112 | free_irq(aac->pdev->irq, |
1112 | &(aac->aac_msix[0])); | 1113 | &(aac->aac_msix[0])); |
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index 5c6a8703f535..9570612b80ce 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c | |||
@@ -400,16 +400,13 @@ int aac_rx_deliver_producer(struct fib * fib) | |||
400 | { | 400 | { |
401 | struct aac_dev *dev = fib->dev; | 401 | struct aac_dev *dev = fib->dev; |
402 | struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; | 402 | struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; |
403 | unsigned long qflags; | ||
404 | u32 Index; | 403 | u32 Index; |
405 | unsigned long nointr = 0; | 404 | unsigned long nointr = 0; |
406 | 405 | ||
407 | spin_lock_irqsave(q->lock, qflags); | ||
408 | aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr); | 406 | aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr); |
409 | 407 | ||
410 | q->numpending++; | 408 | atomic_inc(&q->numpending); |
411 | *(q->headers.producer) = cpu_to_le32(Index + 1); | 409 | *(q->headers.producer) = cpu_to_le32(Index + 1); |
412 | spin_unlock_irqrestore(q->lock, qflags); | ||
413 | if (!(nointr & aac_config.irq_mod)) | 410 | if (!(nointr & aac_config.irq_mod)) |
414 | aac_adapter_notify(dev, AdapNormCmdQueue); | 411 | aac_adapter_notify(dev, AdapNormCmdQueue); |
415 | 412 | ||
@@ -426,15 +423,12 @@ static int aac_rx_deliver_message(struct fib * fib) | |||
426 | { | 423 | { |
427 | struct aac_dev *dev = fib->dev; | 424 | struct aac_dev *dev = fib->dev; |
428 | struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; | 425 | struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; |
429 | unsigned long qflags; | ||
430 | u32 Index; | 426 | u32 Index; |
431 | u64 addr; | 427 | u64 addr; |
432 | volatile void __iomem *device; | 428 | volatile void __iomem *device; |
433 | 429 | ||
434 | unsigned long count = 10000000L; /* 50 seconds */ | 430 | unsigned long count = 10000000L; /* 50 seconds */ |
435 | spin_lock_irqsave(q->lock, qflags); | 431 | atomic_inc(&q->numpending); |
436 | q->numpending++; | ||
437 | spin_unlock_irqrestore(q->lock, qflags); | ||
438 | for(;;) { | 432 | for(;;) { |
439 | Index = rx_readl(dev, MUnit.InboundQueue); | 433 | Index = rx_readl(dev, MUnit.InboundQueue); |
440 | if (unlikely(Index == 0xFFFFFFFFL)) | 434 | if (unlikely(Index == 0xFFFFFFFFL)) |
@@ -442,9 +436,7 @@ static int aac_rx_deliver_message(struct fib * fib) | |||
442 | if (likely(Index != 0xFFFFFFFFL)) | 436 | if (likely(Index != 0xFFFFFFFFL)) |
443 | break; | 437 | break; |
444 | if (--count == 0) { | 438 | if (--count == 0) { |
445 | spin_lock_irqsave(q->lock, qflags); | 439 | atomic_dec(&q->numpending); |
446 | q->numpending--; | ||
447 | spin_unlock_irqrestore(q->lock, qflags); | ||
448 | return -ETIMEDOUT; | 440 | return -ETIMEDOUT; |
449 | } | 441 | } |
450 | udelay(5); | 442 | udelay(5); |
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 4a963cd4a941..4596e9dd757c 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c | |||
@@ -444,15 +444,12 @@ static int aac_src_deliver_message(struct fib *fib) | |||
444 | { | 444 | { |
445 | struct aac_dev *dev = fib->dev; | 445 | struct aac_dev *dev = fib->dev; |
446 | struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; | 446 | struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; |
447 | unsigned long qflags; | ||
448 | u32 fibsize; | 447 | u32 fibsize; |
449 | dma_addr_t address; | 448 | dma_addr_t address; |
450 | struct aac_fib_xporthdr *pFibX; | 449 | struct aac_fib_xporthdr *pFibX; |
451 | u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size); | 450 | u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size); |
452 | 451 | ||
453 | spin_lock_irqsave(q->lock, qflags); | 452 | atomic_inc(&q->numpending); |
454 | q->numpending++; | ||
455 | spin_unlock_irqrestore(q->lock, qflags); | ||
456 | 453 | ||
457 | if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest && | 454 | if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest && |
458 | dev->max_msix > 1) { | 455 | dev->max_msix > 1) { |
@@ -794,6 +791,7 @@ int aac_srcv_init(struct aac_dev *dev) | |||
794 | int instance = dev->id; | 791 | int instance = dev->id; |
795 | int i, j; | 792 | int i, j; |
796 | const char *name = dev->name; | 793 | const char *name = dev->name; |
794 | int cpu; | ||
797 | 795 | ||
798 | dev->a_ops.adapter_ioremap = aac_srcv_ioremap; | 796 | dev->a_ops.adapter_ioremap = aac_srcv_ioremap; |
799 | dev->a_ops.adapter_comm = aac_src_select_comm; | 797 | dev->a_ops.adapter_comm = aac_src_select_comm; |
@@ -911,6 +909,7 @@ int aac_srcv_init(struct aac_dev *dev) | |||
911 | if (dev->msi_enabled) | 909 | if (dev->msi_enabled) |
912 | aac_src_access_devreg(dev, AAC_ENABLE_MSIX); | 910 | aac_src_access_devreg(dev, AAC_ENABLE_MSIX); |
913 | if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) { | 911 | if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) { |
912 | cpu = cpumask_first(cpu_online_mask); | ||
914 | for (i = 0; i < dev->max_msix; i++) { | 913 | for (i = 0; i < dev->max_msix; i++) { |
915 | dev->aac_msix[i].vector_no = i; | 914 | dev->aac_msix[i].vector_no = i; |
916 | dev->aac_msix[i].dev = dev; | 915 | dev->aac_msix[i].dev = dev; |
@@ -928,6 +927,13 @@ int aac_srcv_init(struct aac_dev *dev) | |||
928 | pci_disable_msix(dev->pdev); | 927 | pci_disable_msix(dev->pdev); |
929 | goto error_iounmap; | 928 | goto error_iounmap; |
930 | } | 929 | } |
930 | if (irq_set_affinity_hint( | ||
931 | dev->msixentry[i].vector, | ||
932 | get_cpu_mask(cpu))) { | ||
933 | printk(KERN_ERR "%s%d: Failed to set IRQ affinity for cpu %d\n", | ||
934 | name, instance, cpu); | ||
935 | } | ||
936 | cpu = cpumask_next(cpu, cpu_online_mask); | ||
931 | } | 937 | } |
932 | } else { | 938 | } else { |
933 | dev->aac_msix[0].vector_no = 0; | 939 | dev->aac_msix[0].vector_no = 0; |