aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/block/cciss.c267
-rw-r--r--drivers/block/cciss.h109
-rw-r--r--drivers/block/cciss_cmd.h34
-rw-r--r--drivers/block/cciss_scsi.c6
4 files changed, 353 insertions, 63 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index cd830cb64a5d..08a2e619dc36 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -206,6 +206,11 @@ static void cciss_device_release(struct device *dev);
206static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); 206static void cciss_free_gendisk(ctlr_info_t *h, int drv_index);
207static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); 207static void cciss_free_drive_info(ctlr_info_t *h, int drv_index);
208 208
209/* performant mode helper functions */
210static void calc_bucket_map(int *bucket, int num_buckets, int nsgs,
211 int *bucket_map);
212static void cciss_put_controller_into_performant_mode(ctlr_info_t *h);
213
209#ifdef CONFIG_PROC_FS 214#ifdef CONFIG_PROC_FS
210static void cciss_procinit(int i); 215static void cciss_procinit(int i);
211#else 216#else
@@ -231,6 +236,16 @@ static const struct block_device_operations cciss_fops = {
231 .revalidate_disk = cciss_revalidate, 236 .revalidate_disk = cciss_revalidate,
232}; 237};
233 238
239/* set_performant_mode: Modify the tag for cciss performant
240 * set bit 0 for pull model, bits 3-1 for block fetch
241 * register number
242 */
243static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c)
244{
245 if (likely(h->transMethod == CFGTBL_Trans_Performant))
246 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
247}
248
234/* 249/*
235 * Enqueuing and dequeuing functions for cmdlists. 250 * Enqueuing and dequeuing functions for cmdlists.
236 */ 251 */
@@ -261,6 +276,7 @@ static void enqueue_cmd_and_start_io(ctlr_info_t *h,
261 CommandList_struct *c) 276 CommandList_struct *c)
262{ 277{
263 unsigned long flags; 278 unsigned long flags;
279 set_performant_mode(h, c);
264 spin_lock_irqsave(&h->lock, flags); 280 spin_lock_irqsave(&h->lock, flags);
265 addQ(&h->reqQ, c); 281 addQ(&h->reqQ, c);
266 h->Qdepth++; 282 h->Qdepth++;
@@ -350,6 +366,28 @@ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
350 366
351#ifdef CONFIG_PROC_FS 367#ifdef CONFIG_PROC_FS
352 368
369static inline u32 next_command(ctlr_info_t *h)
370{
371 u32 a;
372
373 if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
374 return h->access.command_completed(h);
375
376 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
377 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
378 (h->reply_pool_head)++;
379 h->commands_outstanding--;
380 } else {
381 a = FIFO_EMPTY;
382 }
383 /* Check for wraparound */
384 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
385 h->reply_pool_head = h->reply_pool;
386 h->reply_pool_wraparound ^= 1;
387 }
388 return a;
389}
390
353/* 391/*
354 * Report information about this controller. 392 * Report information about this controller.
355 */ 393 */
@@ -377,7 +415,7 @@ static void cciss_seq_show_header(struct seq_file *seq)
377 h->product_name, 415 h->product_name,
378 (unsigned long)h->board_id, 416 (unsigned long)h->board_id,
379 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], 417 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
380 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], 418 h->firm_ver[3], (unsigned int)h->intr[PERF_MODE_INT],
381 h->num_luns, 419 h->num_luns,
382 h->Qdepth, h->commands_outstanding, 420 h->Qdepth, h->commands_outstanding,
383 h->maxQsinceinit, h->max_outstanding, h->maxSG); 421 h->maxQsinceinit, h->max_outstanding, h->maxSG);
@@ -3126,13 +3164,13 @@ after_error_processing:
3126 3164
3127static inline u32 cciss_tag_contains_index(u32 tag) 3165static inline u32 cciss_tag_contains_index(u32 tag)
3128{ 3166{
3129#define DIRECT_LOOKUP_BIT 0x04 3167#define DIRECT_LOOKUP_BIT 0x10
3130 return tag & DIRECT_LOOKUP_BIT; 3168 return tag & DIRECT_LOOKUP_BIT;
3131} 3169}
3132 3170
3133static inline u32 cciss_tag_to_index(u32 tag) 3171static inline u32 cciss_tag_to_index(u32 tag)
3134{ 3172{
3135#define DIRECT_LOOKUP_SHIFT 3 3173#define DIRECT_LOOKUP_SHIFT 5
3136 return tag >> DIRECT_LOOKUP_SHIFT; 3174 return tag >> DIRECT_LOOKUP_SHIFT;
3137} 3175}
3138 3176
@@ -3262,9 +3300,12 @@ static void do_cciss_request(struct request_queue *q)
3262 blk_rq_sectors(creq), seg, chained); 3300 blk_rq_sectors(creq), seg, chained);
3263#endif /* CCISS_DEBUG */ 3301#endif /* CCISS_DEBUG */
3264 3302
3265 c->Header.SGList = c->Header.SGTotal = seg + chained; 3303 c->Header.SGTotal = seg + chained;
3266 if (seg > h->max_cmd_sgentries) 3304 if (seg <= h->max_cmd_sgentries)
3305 c->Header.SGList = c->Header.SGTotal;
3306 else
3267 c->Header.SGList = h->max_cmd_sgentries; 3307 c->Header.SGList = h->max_cmd_sgentries;
3308 set_performant_mode(h, c);
3268 3309
3269 if (likely(blk_fs_request(creq))) { 3310 if (likely(blk_fs_request(creq))) {
3270 if(h->cciss_read == CCISS_READ_10) { 3311 if(h->cciss_read == CCISS_READ_10) {
@@ -3370,10 +3411,10 @@ static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag)
3370 3411
3371 tag_index = cciss_tag_to_index(raw_tag); 3412 tag_index = cciss_tag_to_index(raw_tag);
3372 if (bad_tag(h, tag_index, raw_tag)) 3413 if (bad_tag(h, tag_index, raw_tag))
3373 return get_next_completion(h); 3414 return next_command(h);
3374 c = h->cmd_pool + tag_index; 3415 c = h->cmd_pool + tag_index;
3375 finish_cmd(h, c, raw_tag); 3416 finish_cmd(h, c, raw_tag);
3376 return get_next_completion(h); 3417 return next_command(h);
3377} 3418}
3378 3419
3379/* process completion of a non-indexed command */ 3420/* process completion of a non-indexed command */
@@ -3390,11 +3431,11 @@ static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag)
3390 tag_masked = cciss_tag_discard_error_bits(tag); 3431 tag_masked = cciss_tag_discard_error_bits(tag);
3391 if (busaddr_masked == tag_masked) { 3432 if (busaddr_masked == tag_masked) {
3392 finish_cmd(h, c, raw_tag); 3433 finish_cmd(h, c, raw_tag);
3393 return get_next_completion(h); 3434 return next_command(h);
3394 } 3435 }
3395 } 3436 }
3396 bad_tag(h, h->nr_cmds + 1, raw_tag); 3437 bad_tag(h, h->nr_cmds + 1, raw_tag);
3397 return get_next_completion(h); 3438 return next_command(h);
3398} 3439}
3399 3440
3400static irqreturn_t do_cciss_intx(int irq, void *dev_id) 3441static irqreturn_t do_cciss_intx(int irq, void *dev_id)
@@ -3700,6 +3741,155 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3700 return -1; 3741 return -1;
3701} 3742}
3702 3743
3744/* Fill in bucket_map[], given nsgs (the max number of
3745 * scatter gather elements supported) and bucket[],
3746 * which is an array of 8 integers. The bucket[] array
3747 * contains 8 different DMA transfer sizes (in 16
3748 * byte increments) which the controller uses to fetch
3749 * commands. This function fills in bucket_map[], which
3750 * maps a given number of scatter gather elements to one of
3751 * the 8 DMA transfer sizes. The point of it is to allow the
3752 * controller to only do as much DMA as needed to fetch the
3753 * command, with the DMA transfer size encoded in the lower
3754 * bits of the command address.
3755 */
3756static void calc_bucket_map(int bucket[], int num_buckets,
3757 int nsgs, int *bucket_map)
3758{
3759 int i, j, b, size;
3760
3761 /* even a command with 0 SGs requires 4 blocks */
3762#define MINIMUM_TRANSFER_BLOCKS 4
3763#define NUM_BUCKETS 8
3764 /* Note, bucket_map must have nsgs+1 entries. */
3765 for (i = 0; i <= nsgs; i++) {
3766 /* Compute size of a command with i SG entries */
3767 size = i + MINIMUM_TRANSFER_BLOCKS;
3768 b = num_buckets; /* Assume the biggest bucket */
3769 /* Find the bucket that is just big enough */
3770 for (j = 0; j < 8; j++) {
3771 if (bucket[j] >= size) {
3772 b = j;
3773 break;
3774 }
3775 }
3776 /* for a command with i SG entries, use bucket b. */
3777 bucket_map[i] = b;
3778 }
3779}
3780
3781static void
3782cciss_put_controller_into_performant_mode(ctlr_info_t *h)
3783{
3784 int l = 0;
3785 __u32 trans_support;
3786 __u32 trans_offset;
3787 /*
3788 * 5 = 1 s/g entry or 4k
3789 * 6 = 2 s/g entry or 8k
3790 * 8 = 4 s/g entry or 16k
3791 * 10 = 6 s/g entry or 24k
3792 */
3793 int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
3794 unsigned long register_value;
3795
3796 BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
3797
3798 /* Attempt to put controller into performant mode if supported */
3799 /* Does board support performant mode? */
3800 trans_support = readl(&(h->cfgtable->TransportSupport));
3801 if (!(trans_support & PERFORMANT_MODE))
3802 return;
3803
3804 printk(KERN_WARNING "cciss%d: Placing controller into "
3805 "performant mode\n", h->ctlr);
3806 /* Performant mode demands commands on a 32 byte boundary
3807 * pci_alloc_consistent aligns on page boundarys already.
3808 * Just need to check if divisible by 32
3809 */
3810 if ((sizeof(CommandList_struct) % 32) != 0) {
3811 printk(KERN_WARNING "%s %d %s\n",
3812 "cciss info: command size[",
3813 (int)sizeof(CommandList_struct),
3814 "] not divisible by 32, no performant mode..\n");
3815 return;
3816 }
3817
3818 /* Performant mode ring buffer and supporting data structures */
3819 h->reply_pool = (__u64 *)pci_alloc_consistent(
3820 h->pdev, h->max_commands * sizeof(__u64),
3821 &(h->reply_pool_dhandle));
3822
3823 /* Need a block fetch table for performant mode */
3824 h->blockFetchTable = kmalloc(((h->maxsgentries+1) *
3825 sizeof(__u32)), GFP_KERNEL);
3826
3827 if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL))
3828 goto clean_up;
3829
3830 h->reply_pool_wraparound = 1; /* spec: init to 1 */
3831
3832 /* Controller spec: zero out this buffer. */
3833 memset(h->reply_pool, 0, h->max_commands * sizeof(__u64));
3834 h->reply_pool_head = h->reply_pool;
3835
3836 trans_offset = readl(&(h->cfgtable->TransMethodOffset));
3837 calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries,
3838 h->blockFetchTable);
3839 writel(bft[0], &h->transtable->BlockFetch0);
3840 writel(bft[1], &h->transtable->BlockFetch1);
3841 writel(bft[2], &h->transtable->BlockFetch2);
3842 writel(bft[3], &h->transtable->BlockFetch3);
3843 writel(bft[4], &h->transtable->BlockFetch4);
3844 writel(bft[5], &h->transtable->BlockFetch5);
3845 writel(bft[6], &h->transtable->BlockFetch6);
3846 writel(bft[7], &h->transtable->BlockFetch7);
3847
3848 /* size of controller ring buffer */
3849 writel(h->max_commands, &h->transtable->RepQSize);
3850 writel(1, &h->transtable->RepQCount);
3851 writel(0, &h->transtable->RepQCtrAddrLow32);
3852 writel(0, &h->transtable->RepQCtrAddrHigh32);
3853 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
3854 writel(0, &h->transtable->RepQAddr0High32);
3855 writel(CFGTBL_Trans_Performant,
3856 &(h->cfgtable->HostWrite.TransportRequest));
3857
3858 h->transMethod = CFGTBL_Trans_Performant;
3859 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3860 /* under certain very rare conditions, this can take awhile.
3861 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3862 * as we enter this code.) */
3863 for (l = 0; l < MAX_CONFIG_WAIT; l++) {
3864 register_value = readl(h->vaddr + SA5_DOORBELL);
3865 if (!(register_value & CFGTBL_ChangeReq))
3866 break;
3867 /* delay and try again */
3868 set_current_state(TASK_INTERRUPTIBLE);
3869 schedule_timeout(10);
3870 }
3871 register_value = readl(&(h->cfgtable->TransportActive));
3872 if (!(register_value & CFGTBL_Trans_Performant)) {
3873 printk(KERN_WARNING "cciss: unable to get board into"
3874 " performant mode\n");
3875 return;
3876 }
3877
3878 /* Change the access methods to the performant access methods */
3879 h->access = SA5_performant_access;
3880
3881 return;
3882clean_up:
3883 kfree(h->blockFetchTable);
3884 if (h->reply_pool)
3885 pci_free_consistent(h->pdev,
3886 h->max_commands * sizeof(__u64),
3887 h->reply_pool,
3888 h->reply_pool_dhandle);
3889 return;
3890
3891} /* cciss_put_controller_into_performant_mode */
3892
3703/* If MSI/MSI-X is supported by the kernel we will try to enable it on 3893/* If MSI/MSI-X is supported by the kernel we will try to enable it on
3704 * controllers that are capable. If not, we use IO-APIC mode. 3894 * controllers that are capable. If not, we use IO-APIC mode.
3705 */ 3895 */
@@ -3749,7 +3939,7 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
3749default_int_mode: 3939default_int_mode:
3750#endif /* CONFIG_PCI_MSI */ 3940#endif /* CONFIG_PCI_MSI */
3751 /* if we get here we're going to use the default interrupt mode */ 3941 /* if we get here we're going to use the default interrupt mode */
3752 c->intr[SIMPLE_MODE_INT] = pdev->irq; 3942 c->intr[PERF_MODE_INT] = pdev->irq;
3753 return; 3943 return;
3754} 3944}
3755 3945
@@ -3761,6 +3951,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3761 __u32 cfg_base_addr; 3951 __u32 cfg_base_addr;
3762 __u64 cfg_base_addr_index; 3952 __u64 cfg_base_addr_index;
3763 int i, prod_index, err; 3953 int i, prod_index, err;
3954 __u32 trans_offset;
3764 3955
3765 subsystem_vendor_id = pdev->subsystem_vendor; 3956 subsystem_vendor_id = pdev->subsystem_vendor;
3766 subsystem_device_id = pdev->subsystem_device; 3957 subsystem_device_id = pdev->subsystem_device;
@@ -3874,11 +4065,16 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3874 c->cfgtable = remap_pci_mem(pci_resource_start(pdev, 4065 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3875 cfg_base_addr_index) + 4066 cfg_base_addr_index) +
3876 cfg_offset, sizeof(CfgTable_struct)); 4067 cfg_offset, sizeof(CfgTable_struct));
4068 /* Find performant mode table. */
4069 trans_offset = readl(&(c->cfgtable->TransMethodOffset));
4070 c->transtable = remap_pci_mem(pci_resource_start(pdev,
4071 cfg_base_addr_index) + cfg_offset+trans_offset,
4072 sizeof(*c->transtable));
3877 c->board_id = board_id; 4073 c->board_id = board_id;
3878 4074
3879#ifdef CCISS_DEBUG 4075 #ifdef CCISS_DEBUG
3880 print_cfg_table(c->cfgtable); 4076 print_cfg_table(c->cfgtable);
3881#endif /* CCISS_DEBUG */ 4077 #endif /* CCISS_DEBUG */
3882 4078
3883 /* Some controllers support Zero Memory Raid (ZMR). 4079 /* Some controllers support Zero Memory Raid (ZMR).
3884 * When configured in ZMR mode the number of supported 4080 * When configured in ZMR mode the number of supported
@@ -3888,7 +4084,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3888 * are supported on the controller then subtract 4 to 4084 * are supported on the controller then subtract 4 to
3889 * leave a little room for ioctl calls. 4085 * leave a little room for ioctl calls.
3890 */ 4086 */
3891 c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); 4087 c->max_commands = readl(&(c->cfgtable->MaxPerformantModeCommands));
3892 c->maxsgentries = readl(&(c->cfgtable->MaxSGElements)); 4088 c->maxsgentries = readl(&(c->cfgtable->MaxSGElements));
3893 4089
3894 /* 4090 /*
@@ -3933,7 +4129,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3933 * kernels revealed a bug in the refetch if dom0 resides on a P600. 4129 * kernels revealed a bug in the refetch if dom0 resides on a P600.
3934 */ 4130 */
3935 if(board_id == 0x3225103C) { 4131 if(board_id == 0x3225103C) {
3936 __u32 dma_prefetch; 4132 __u32 dma_prefetch;
3937 __u32 dma_refetch; 4133 __u32 dma_refetch;
3938 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG); 4134 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3939 dma_prefetch |= 0x8000; 4135 dma_prefetch |= 0x8000;
@@ -3944,38 +4140,8 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3944 } 4140 }
3945 4141
3946#ifdef CCISS_DEBUG 4142#ifdef CCISS_DEBUG
3947 printk("Trying to put board into Simple mode\n"); 4143 printk(KERN_WARNING "Trying to put board into Performant mode\n");
3948#endif /* CCISS_DEBUG */ 4144#endif /* CCISS_DEBUG */
3949 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3950 /* Update the field, and then ring the doorbell */
3951 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3952 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3953
3954 /* under certain very rare conditions, this can take awhile.
3955 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3956 * as we enter this code.) */
3957 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3958 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3959 break;
3960 /* delay and try again */
3961 set_current_state(TASK_INTERRUPTIBLE);
3962 schedule_timeout(msecs_to_jiffies(1));
3963 }
3964
3965#ifdef CCISS_DEBUG
3966 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3967 readl(c->vaddr + SA5_DOORBELL));
3968#endif /* CCISS_DEBUG */
3969#ifdef CCISS_DEBUG
3970 print_cfg_table(c->cfgtable);
3971#endif /* CCISS_DEBUG */
3972
3973 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3974 printk(KERN_WARNING "cciss: unable to get board into"
3975 " simple mode\n");
3976 err = -ENODEV;
3977 goto err_out_free_res;
3978 }
3979 return 0; 4145 return 0;
3980 4146
3981err_out_free_res: 4147err_out_free_res:
@@ -3984,6 +4150,7 @@ err_out_free_res:
3984 * Smart Array controllers that pci_enable_device does not undo 4150 * Smart Array controllers that pci_enable_device does not undo
3985 */ 4151 */
3986 pci_release_regions(pdev); 4152 pci_release_regions(pdev);
4153 cciss_put_controller_into_performant_mode(c);
3987 return err; 4154 return err;
3988} 4155}
3989 4156
@@ -4260,7 +4427,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4260 i = alloc_cciss_hba(); 4427 i = alloc_cciss_hba();
4261 if (i < 0) 4428 if (i < 0)
4262 return -1; 4429 return -1;
4263
4264 hba[i]->busy_initializing = 1; 4430 hba[i]->busy_initializing = 1;
4265 INIT_HLIST_HEAD(&hba[i]->cmpQ); 4431 INIT_HLIST_HEAD(&hba[i]->cmpQ);
4266 INIT_HLIST_HEAD(&hba[i]->reqQ); 4432 INIT_HLIST_HEAD(&hba[i]->reqQ);
@@ -4327,7 +4493,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4327 4493
4328 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", 4494 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
4329 hba[i]->devname, pdev->device, pci_name(pdev), 4495 hba[i]->devname, pdev->device, pci_name(pdev),
4330 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not"); 4496 hba[i]->intr[PERF_MODE_INT], dac ? "" : " not");
4331 4497
4332 hba[i]->cmd_pool_bits = 4498 hba[i]->cmd_pool_bits =
4333 kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG) 4499 kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
@@ -4433,7 +4599,7 @@ clean4:
4433 hba[i]->nr_cmds * sizeof(ErrorInfo_struct), 4599 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
4434 hba[i]->errinfo_pool, 4600 hba[i]->errinfo_pool,
4435 hba[i]->errinfo_pool_dhandle); 4601 hba[i]->errinfo_pool_dhandle);
4436 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]); 4602 free_irq(hba[i]->intr[PERF_MODE_INT], hba[i]);
4437clean2: 4603clean2:
4438 unregister_blkdev(hba[i]->major, hba[i]->devname); 4604 unregister_blkdev(hba[i]->major, hba[i]->devname);
4439clean1: 4605clean1:
@@ -4475,7 +4641,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
4475 printk(KERN_WARNING "cciss%d: Error flushing cache\n", 4641 printk(KERN_WARNING "cciss%d: Error flushing cache\n",
4476 h->ctlr); 4642 h->ctlr);
4477 h->access.set_intr_mask(h, CCISS_INTR_OFF); 4643 h->access.set_intr_mask(h, CCISS_INTR_OFF);
4478 free_irq(h->intr[2], h); 4644 free_irq(h->intr[PERF_MODE_INT], h);
4479} 4645}
4480 4646
4481static void __devexit cciss_remove_one(struct pci_dev *pdev) 4647static void __devexit cciss_remove_one(struct pci_dev *pdev)
@@ -4575,7 +4741,6 @@ static int __init cciss_init(void)
4575 * array of them, the size must be a multiple of 8 bytes. 4741 * array of them, the size must be a multiple of 8 bytes.
4576 */ 4742 */
4577 BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT); 4743 BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT);
4578
4579 printk(KERN_INFO DRIVER_NAME "\n"); 4744 printk(KERN_INFO DRIVER_NAME "\n");
4580 4745
4581 err = bus_register(&cciss_bus_type); 4746 err = bus_register(&cciss_bus_type);
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index c527932d223a..8a9f5b58daa8 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -85,8 +85,8 @@ struct ctlr_info
85 int max_cmd_sgentries; 85 int max_cmd_sgentries;
86 SGDescriptor_struct **cmd_sg_list; 86 SGDescriptor_struct **cmd_sg_list;
87 87
88# define DOORBELL_INT 0 88# define PERF_MODE_INT 0
89# define PERF_MODE_INT 1 89# define DOORBELL_INT 1
90# define SIMPLE_MODE_INT 2 90# define SIMPLE_MODE_INT 2
91# define MEMQ_MODE_INT 3 91# define MEMQ_MODE_INT 3
92 unsigned int intr[4]; 92 unsigned int intr[4];
@@ -137,10 +137,27 @@ struct ctlr_info
137 struct list_head scan_list; 137 struct list_head scan_list;
138 struct completion scan_wait; 138 struct completion scan_wait;
139 struct device dev; 139 struct device dev;
140 /*
141 * Performant mode tables.
142 */
143 u32 trans_support;
144 u32 trans_offset;
145 struct TransTable_struct *transtable;
146 unsigned long transMethod;
147
148 /*
149 * Performant mode completion buffer
150 */
151 u64 *reply_pool;
152 dma_addr_t reply_pool_dhandle;
153 u64 *reply_pool_head;
154 size_t reply_pool_size;
155 unsigned char reply_pool_wraparound;
156 u32 *blockFetchTable;
140}; 157};
141 158
142/* Defining the diffent access_menthods */ 159/* Defining the diffent access_methods
143/* 160 *
144 * Memory mapped FIFO interface (SMART 53xx cards) 161 * Memory mapped FIFO interface (SMART 53xx cards)
145 */ 162 */
146#define SA5_DOORBELL 0x20 163#define SA5_DOORBELL 0x20
@@ -159,6 +176,15 @@ struct ctlr_info
159#define SA5B_INTR_PENDING 0x04 176#define SA5B_INTR_PENDING 0x04
160#define FIFO_EMPTY 0xffffffff 177#define FIFO_EMPTY 0xffffffff
161#define CCISS_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ 178#define CCISS_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
179/* Perf. mode flags */
180#define SA5_PERF_INTR_PENDING 0x04
181#define SA5_PERF_INTR_OFF 0x05
182#define SA5_OUTDB_STATUS_PERF_BIT 0x01
183#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
184#define SA5_OUTDB_CLEAR 0xA0
185#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
186#define SA5_OUTDB_STATUS 0x9C
187
162 188
163#define CISS_ERROR_BIT 0x02 189#define CISS_ERROR_BIT 0x02
164 190
@@ -170,8 +196,9 @@ struct ctlr_info
170static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) 196static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c)
171{ 197{
172#ifdef CCISS_DEBUG 198#ifdef CCISS_DEBUG
173 printk("Sending %x - down to controller\n", c->busaddr ); 199 printk(KERN_WARNING "cciss%d: Sending %08x - down to controller\n",
174#endif /* CCISS_DEBUG */ 200 h->ctlr, c->busaddr);
201#endif /* CCISS_DEBUG */
175 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 202 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
176 h->commands_outstanding++; 203 h->commands_outstanding++;
177 if ( h->commands_outstanding > h->max_outstanding) 204 if ( h->commands_outstanding > h->max_outstanding)
@@ -214,6 +241,20 @@ static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val)
214 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 241 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
215 } 242 }
216} 243}
244
245/* Performant mode intr_mask */
246static void SA5_performant_intr_mask(ctlr_info_t *h, unsigned long val)
247{
248 if (val) { /* turn on interrupts */
249 h->interrupts_enabled = 1;
250 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
251 } else {
252 h->interrupts_enabled = 0;
253 writel(SA5_PERF_INTR_OFF,
254 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
255 }
256}
257
217/* 258/*
218 * Returns true if fifo is full. 259 * Returns true if fifo is full.
219 * 260 *
@@ -250,6 +291,40 @@ static unsigned long SA5_completed(ctlr_info_t *h)
250 return ( register_value); 291 return ( register_value);
251 292
252} 293}
294
295/* Performant mode command completed */
296static unsigned long SA5_performant_completed(ctlr_info_t *h)
297{
298 unsigned long register_value = FIFO_EMPTY;
299
300 /* flush the controller write of the reply queue by reading
301 * outbound doorbell status register.
302 */
303 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
304 /* msi auto clears the interrupt pending bit. */
305 if (!(h->msi_vector || h->msix_vector)) {
306 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
307 /* Do a read in order to flush the write to the controller
308 * (as per spec.)
309 */
310 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
311 }
312
313 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
314 register_value = *(h->reply_pool_head);
315 (h->reply_pool_head)++;
316 h->commands_outstanding--;
317 } else {
318 register_value = FIFO_EMPTY;
319 }
320 /* Check for wraparound */
321 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
322 h->reply_pool_head = h->reply_pool;
323 h->reply_pool_wraparound ^= 1;
324 }
325
326 return register_value;
327}
253/* 328/*
254 * Returns true if an interrupt is pending.. 329 * Returns true if an interrupt is pending..
255 */ 330 */
@@ -280,6 +355,20 @@ static bool SA5B_intr_pending(ctlr_info_t *h)
280 return 0 ; 355 return 0 ;
281} 356}
282 357
358static bool SA5_performant_intr_pending(ctlr_info_t *h)
359{
360 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
361
362 if (!register_value)
363 return false;
364
365 if (h->msi_vector || h->msix_vector)
366 return true;
367
368 /* Read outbound doorbell to flush */
369 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
370 return register_value & SA5_OUTDB_STATUS_PERF_BIT;
371}
283 372
284static struct access_method SA5_access = { 373static struct access_method SA5_access = {
285 SA5_submit_command, 374 SA5_submit_command,
@@ -297,6 +386,14 @@ static struct access_method SA5B_access = {
297 SA5_completed, 386 SA5_completed,
298}; 387};
299 388
389static struct access_method SA5_performant_access = {
390 SA5_submit_command,
391 SA5_performant_intr_mask,
392 SA5_fifo_full,
393 SA5_performant_intr_pending,
394 SA5_performant_completed,
395};
396
300struct board_type { 397struct board_type {
301 __u32 board_id; 398 __u32 board_id;
302 char *product_name; 399 char *product_name;
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index e624ff959cb6..eda6a8e6b600 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -54,6 +54,7 @@
54#define CFGTBL_AccCmds 0x00000001l 54#define CFGTBL_AccCmds 0x00000001l
55 55
56#define CFGTBL_Trans_Simple 0x00000002l 56#define CFGTBL_Trans_Simple 0x00000002l
57#define CFGTBL_Trans_Performant 0x00000004l
57 58
58#define CFGTBL_BusType_Ultra2 0x00000001l 59#define CFGTBL_BusType_Ultra2 0x00000001l
59#define CFGTBL_BusType_Ultra3 0x00000002l 60#define CFGTBL_BusType_Ultra3 0x00000002l
@@ -173,12 +174,15 @@ typedef struct _SGDescriptor_struct {
173 * PAD_64 can be adjusted independently as needed for 32-bit 174 * PAD_64 can be adjusted independently as needed for 32-bit
174 * and 64-bits systems. 175 * and 64-bits systems.
175 */ 176 */
176#define COMMANDLIST_ALIGNMENT (8) 177#define COMMANDLIST_ALIGNMENT (32)
177#define IS_64_BIT ((sizeof(long) - 4)/4) 178#define IS_64_BIT ((sizeof(long) - 4)/4)
178#define IS_32_BIT (!IS_64_BIT) 179#define IS_32_BIT (!IS_64_BIT)
179#define PAD_32 (0) 180#define PAD_32 (32)
180#define PAD_64 (4) 181#define PAD_64 (4)
181#define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) 182#define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
183#define DIRECT_LOOKUP_BIT 0x10
184#define DIRECT_LOOKUP_SHIFT 5
185
182typedef struct _CommandList_struct { 186typedef struct _CommandList_struct {
183 CommandListHeader_struct Header; 187 CommandListHeader_struct Header;
184 RequestBlock_struct Request; 188 RequestBlock_struct Request;
@@ -195,7 +199,7 @@ typedef struct _CommandList_struct {
195 struct completion *waiting; 199 struct completion *waiting;
196 int retry_count; 200 int retry_count;
197 void * scsi_cmd; 201 void * scsi_cmd;
198 char pad[PADSIZE]; 202 char pad[PADSIZE];
199} CommandList_struct; 203} CommandList_struct;
200 204
201/* Configuration Table Structure */ 205/* Configuration Table Structure */
@@ -209,12 +213,15 @@ typedef struct _HostWrite_struct {
209typedef struct _CfgTable_struct { 213typedef struct _CfgTable_struct {
210 BYTE Signature[4]; 214 BYTE Signature[4];
211 DWORD SpecValence; 215 DWORD SpecValence;
216#define SIMPLE_MODE 0x02
217#define PERFORMANT_MODE 0x04
218#define MEMQ_MODE 0x08
212 DWORD TransportSupport; 219 DWORD TransportSupport;
213 DWORD TransportActive; 220 DWORD TransportActive;
214 HostWrite_struct HostWrite; 221 HostWrite_struct HostWrite;
215 DWORD CmdsOutMax; 222 DWORD CmdsOutMax;
216 DWORD BusTypes; 223 DWORD BusTypes;
217 DWORD Reserved; 224 DWORD TransMethodOffset;
218 BYTE ServerName[16]; 225 BYTE ServerName[16];
219 DWORD HeartBeat; 226 DWORD HeartBeat;
220 DWORD SCSI_Prefetch; 227 DWORD SCSI_Prefetch;
@@ -222,6 +229,25 @@ typedef struct _CfgTable_struct {
222 DWORD MaxLogicalUnits; 229 DWORD MaxLogicalUnits;
223 DWORD MaxPhysicalDrives; 230 DWORD MaxPhysicalDrives;
224 DWORD MaxPhysicalDrivesPerLogicalUnit; 231 DWORD MaxPhysicalDrivesPerLogicalUnit;
232 DWORD MaxPerformantModeCommands;
225} CfgTable_struct; 233} CfgTable_struct;
234
235struct TransTable_struct {
236 u32 BlockFetch0;
237 u32 BlockFetch1;
238 u32 BlockFetch2;
239 u32 BlockFetch3;
240 u32 BlockFetch4;
241 u32 BlockFetch5;
242 u32 BlockFetch6;
243 u32 BlockFetch7;
244 u32 RepQSize;
245 u32 RepQCount;
246 u32 RepQCtrAddrLow32;
247 u32 RepQCtrAddrHigh32;
248 u32 RepQAddr0Low32;
249 u32 RepQAddr0High32;
250};
251
226#pragma pack() 252#pragma pack()
227#endif /* CCISS_CMD_H */ 253#endif /* CCISS_CMD_H */
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 72dae92f3cab..48be47853737 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -93,8 +93,8 @@ static struct scsi_host_template cciss_driver_template = {
93 93
94#pragma pack(1) 94#pragma pack(1)
95 95
96#define SCSI_PAD_32 0 96#define SCSI_PAD_32 8
97#define SCSI_PAD_64 0 97#define SCSI_PAD_64 8
98 98
99struct cciss_scsi_cmd_stack_elem_t { 99struct cciss_scsi_cmd_stack_elem_t {
100 CommandList_struct cmd; 100 CommandList_struct cmd;
@@ -213,6 +213,8 @@ scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa)
213 213
214 /* Check alignment, see cciss_cmd.h near CommandList_struct def. */ 214 /* Check alignment, see cciss_cmd.h near CommandList_struct def. */
215 BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0); 215 BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0);
216 /* printk(KERN_WARNING "cciss_scsi.c: 0x%08x 0x%08x 0x%08x\n",
217 0xdeadbeef, sizeof(*stk->pool), 0xbeefdead); */
216 /* pci_alloc_consistent guarantees 32-bit DMA address will be used */ 218 /* pci_alloc_consistent guarantees 32-bit DMA address will be used */
217 stk->pool = (struct cciss_scsi_cmd_stack_elem_t *) 219 stk->pool = (struct cciss_scsi_cmd_stack_elem_t *)
218 pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle); 220 pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle);