diff options
author | Mike Miller <mike.miller@hp.com> | 2010-06-02 15:58:06 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-08-07 12:12:51 -0400 |
commit | 5e216153c34ac21781110795284a784037f808e3 (patch) | |
tree | b42ceb1e884bdb397283ddb01562fe03d1f95cad /drivers/block/cciss.c | |
parent | 1d1414419f034702bf587accdf2a9ac53245e000 (diff) |
cciss: add performant mode support for Stars/Sirius
Add a mode of controller operation called Performant Mode. Even though
cciss has been deprecated in favor of hpsa there are new controllers due
out next year that HP must support in older vendor distros. Vendors
require all fixes/features be upstream. These new controllers support
only 16 commands in simple mode but support up to 1024 in performant mode.
This requires us to add this support at this late date.
The performant mode transport minimizes host PCI accesses by performinf
many completions per read. PCI writes are posted so the host can write
then immediately get off the bus not waiting for the writwe to complete to
the target. In the context of performant mode the host read out to a
controller pulls all posted writes into host memory ensuring the reply
queue is coherent.
Signed-off-by: Mike Miller <mike.miller@hp.com>
Cc: Stephen M. Cameron <scameron@beardog.cce.hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers/block/cciss.c')
-rw-r--r-- | drivers/block/cciss.c | 267 |
1 files changed, 216 insertions, 51 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index cd830cb64a5d..08a2e619dc36 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -206,6 +206,11 @@ static void cciss_device_release(struct device *dev); | |||
206 | static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); | 206 | static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); |
207 | static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); | 207 | static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); |
208 | 208 | ||
209 | /* performant mode helper functions */ | ||
210 | static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, | ||
211 | int *bucket_map); | ||
212 | static void cciss_put_controller_into_performant_mode(ctlr_info_t *h); | ||
213 | |||
209 | #ifdef CONFIG_PROC_FS | 214 | #ifdef CONFIG_PROC_FS |
210 | static void cciss_procinit(int i); | 215 | static void cciss_procinit(int i); |
211 | #else | 216 | #else |
@@ -231,6 +236,16 @@ static const struct block_device_operations cciss_fops = { | |||
231 | .revalidate_disk = cciss_revalidate, | 236 | .revalidate_disk = cciss_revalidate, |
232 | }; | 237 | }; |
233 | 238 | ||
239 | /* set_performant_mode: Modify the tag for cciss performant | ||
240 | * set bit 0 for pull model, bits 3-1 for block fetch | ||
241 | * register number | ||
242 | */ | ||
243 | static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c) | ||
244 | { | ||
245 | if (likely(h->transMethod == CFGTBL_Trans_Performant)) | ||
246 | c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); | ||
247 | } | ||
248 | |||
234 | /* | 249 | /* |
235 | * Enqueuing and dequeuing functions for cmdlists. | 250 | * Enqueuing and dequeuing functions for cmdlists. |
236 | */ | 251 | */ |
@@ -261,6 +276,7 @@ static void enqueue_cmd_and_start_io(ctlr_info_t *h, | |||
261 | CommandList_struct *c) | 276 | CommandList_struct *c) |
262 | { | 277 | { |
263 | unsigned long flags; | 278 | unsigned long flags; |
279 | set_performant_mode(h, c); | ||
264 | spin_lock_irqsave(&h->lock, flags); | 280 | spin_lock_irqsave(&h->lock, flags); |
265 | addQ(&h->reqQ, c); | 281 | addQ(&h->reqQ, c); |
266 | h->Qdepth++; | 282 | h->Qdepth++; |
@@ -350,6 +366,28 @@ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", | |||
350 | 366 | ||
351 | #ifdef CONFIG_PROC_FS | 367 | #ifdef CONFIG_PROC_FS |
352 | 368 | ||
369 | static inline u32 next_command(ctlr_info_t *h) | ||
370 | { | ||
371 | u32 a; | ||
372 | |||
373 | if (unlikely(h->transMethod != CFGTBL_Trans_Performant)) | ||
374 | return h->access.command_completed(h); | ||
375 | |||
376 | if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { | ||
377 | a = *(h->reply_pool_head); /* Next cmd in ring buffer */ | ||
378 | (h->reply_pool_head)++; | ||
379 | h->commands_outstanding--; | ||
380 | } else { | ||
381 | a = FIFO_EMPTY; | ||
382 | } | ||
383 | /* Check for wraparound */ | ||
384 | if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { | ||
385 | h->reply_pool_head = h->reply_pool; | ||
386 | h->reply_pool_wraparound ^= 1; | ||
387 | } | ||
388 | return a; | ||
389 | } | ||
390 | |||
353 | /* | 391 | /* |
354 | * Report information about this controller. | 392 | * Report information about this controller. |
355 | */ | 393 | */ |
@@ -377,7 +415,7 @@ static void cciss_seq_show_header(struct seq_file *seq) | |||
377 | h->product_name, | 415 | h->product_name, |
378 | (unsigned long)h->board_id, | 416 | (unsigned long)h->board_id, |
379 | h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], | 417 | h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], |
380 | h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], | 418 | h->firm_ver[3], (unsigned int)h->intr[PERF_MODE_INT], |
381 | h->num_luns, | 419 | h->num_luns, |
382 | h->Qdepth, h->commands_outstanding, | 420 | h->Qdepth, h->commands_outstanding, |
383 | h->maxQsinceinit, h->max_outstanding, h->maxSG); | 421 | h->maxQsinceinit, h->max_outstanding, h->maxSG); |
@@ -3126,13 +3164,13 @@ after_error_processing: | |||
3126 | 3164 | ||
3127 | static inline u32 cciss_tag_contains_index(u32 tag) | 3165 | static inline u32 cciss_tag_contains_index(u32 tag) |
3128 | { | 3166 | { |
3129 | #define DIRECT_LOOKUP_BIT 0x04 | 3167 | #define DIRECT_LOOKUP_BIT 0x10 |
3130 | return tag & DIRECT_LOOKUP_BIT; | 3168 | return tag & DIRECT_LOOKUP_BIT; |
3131 | } | 3169 | } |
3132 | 3170 | ||
3133 | static inline u32 cciss_tag_to_index(u32 tag) | 3171 | static inline u32 cciss_tag_to_index(u32 tag) |
3134 | { | 3172 | { |
3135 | #define DIRECT_LOOKUP_SHIFT 3 | 3173 | #define DIRECT_LOOKUP_SHIFT 5 |
3136 | return tag >> DIRECT_LOOKUP_SHIFT; | 3174 | return tag >> DIRECT_LOOKUP_SHIFT; |
3137 | } | 3175 | } |
3138 | 3176 | ||
@@ -3262,9 +3300,12 @@ static void do_cciss_request(struct request_queue *q) | |||
3262 | blk_rq_sectors(creq), seg, chained); | 3300 | blk_rq_sectors(creq), seg, chained); |
3263 | #endif /* CCISS_DEBUG */ | 3301 | #endif /* CCISS_DEBUG */ |
3264 | 3302 | ||
3265 | c->Header.SGList = c->Header.SGTotal = seg + chained; | 3303 | c->Header.SGTotal = seg + chained; |
3266 | if (seg > h->max_cmd_sgentries) | 3304 | if (seg <= h->max_cmd_sgentries) |
3305 | c->Header.SGList = c->Header.SGTotal; | ||
3306 | else | ||
3267 | c->Header.SGList = h->max_cmd_sgentries; | 3307 | c->Header.SGList = h->max_cmd_sgentries; |
3308 | set_performant_mode(h, c); | ||
3268 | 3309 | ||
3269 | if (likely(blk_fs_request(creq))) { | 3310 | if (likely(blk_fs_request(creq))) { |
3270 | if(h->cciss_read == CCISS_READ_10) { | 3311 | if(h->cciss_read == CCISS_READ_10) { |
@@ -3370,10 +3411,10 @@ static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag) | |||
3370 | 3411 | ||
3371 | tag_index = cciss_tag_to_index(raw_tag); | 3412 | tag_index = cciss_tag_to_index(raw_tag); |
3372 | if (bad_tag(h, tag_index, raw_tag)) | 3413 | if (bad_tag(h, tag_index, raw_tag)) |
3373 | return get_next_completion(h); | 3414 | return next_command(h); |
3374 | c = h->cmd_pool + tag_index; | 3415 | c = h->cmd_pool + tag_index; |
3375 | finish_cmd(h, c, raw_tag); | 3416 | finish_cmd(h, c, raw_tag); |
3376 | return get_next_completion(h); | 3417 | return next_command(h); |
3377 | } | 3418 | } |
3378 | 3419 | ||
3379 | /* process completion of a non-indexed command */ | 3420 | /* process completion of a non-indexed command */ |
@@ -3390,11 +3431,11 @@ static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) | |||
3390 | tag_masked = cciss_tag_discard_error_bits(tag); | 3431 | tag_masked = cciss_tag_discard_error_bits(tag); |
3391 | if (busaddr_masked == tag_masked) { | 3432 | if (busaddr_masked == tag_masked) { |
3392 | finish_cmd(h, c, raw_tag); | 3433 | finish_cmd(h, c, raw_tag); |
3393 | return get_next_completion(h); | 3434 | return next_command(h); |
3394 | } | 3435 | } |
3395 | } | 3436 | } |
3396 | bad_tag(h, h->nr_cmds + 1, raw_tag); | 3437 | bad_tag(h, h->nr_cmds + 1, raw_tag); |
3397 | return get_next_completion(h); | 3438 | return next_command(h); |
3398 | } | 3439 | } |
3399 | 3440 | ||
3400 | static irqreturn_t do_cciss_intx(int irq, void *dev_id) | 3441 | static irqreturn_t do_cciss_intx(int irq, void *dev_id) |
@@ -3700,6 +3741,155 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) | |||
3700 | return -1; | 3741 | return -1; |
3701 | } | 3742 | } |
3702 | 3743 | ||
3744 | /* Fill in bucket_map[], given nsgs (the max number of | ||
3745 | * scatter gather elements supported) and bucket[], | ||
3746 | * which is an array of 8 integers. The bucket[] array | ||
3747 | * contains 8 different DMA transfer sizes (in 16 | ||
3748 | * byte increments) which the controller uses to fetch | ||
3749 | * commands. This function fills in bucket_map[], which | ||
3750 | * maps a given number of scatter gather elements to one of | ||
3751 | * the 8 DMA transfer sizes. The point of it is to allow the | ||
3752 | * controller to only do as much DMA as needed to fetch the | ||
3753 | * command, with the DMA transfer size encoded in the lower | ||
3754 | * bits of the command address. | ||
3755 | */ | ||
3756 | static void calc_bucket_map(int bucket[], int num_buckets, | ||
3757 | int nsgs, int *bucket_map) | ||
3758 | { | ||
3759 | int i, j, b, size; | ||
3760 | |||
3761 | /* even a command with 0 SGs requires 4 blocks */ | ||
3762 | #define MINIMUM_TRANSFER_BLOCKS 4 | ||
3763 | #define NUM_BUCKETS 8 | ||
3764 | /* Note, bucket_map must have nsgs+1 entries. */ | ||
3765 | for (i = 0; i <= nsgs; i++) { | ||
3766 | /* Compute size of a command with i SG entries */ | ||
3767 | size = i + MINIMUM_TRANSFER_BLOCKS; | ||
3768 | b = num_buckets; /* Assume the biggest bucket */ | ||
3769 | /* Find the bucket that is just big enough */ | ||
3770 | for (j = 0; j < 8; j++) { | ||
3771 | if (bucket[j] >= size) { | ||
3772 | b = j; | ||
3773 | break; | ||
3774 | } | ||
3775 | } | ||
3776 | /* for a command with i SG entries, use bucket b. */ | ||
3777 | bucket_map[i] = b; | ||
3778 | } | ||
3779 | } | ||
3780 | |||
3781 | static void | ||
3782 | cciss_put_controller_into_performant_mode(ctlr_info_t *h) | ||
3783 | { | ||
3784 | int l = 0; | ||
3785 | __u32 trans_support; | ||
3786 | __u32 trans_offset; | ||
3787 | /* | ||
3788 | * 5 = 1 s/g entry or 4k | ||
3789 | * 6 = 2 s/g entry or 8k | ||
3790 | * 8 = 4 s/g entry or 16k | ||
3791 | * 10 = 6 s/g entry or 24k | ||
3792 | */ | ||
3793 | int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4}; | ||
3794 | unsigned long register_value; | ||
3795 | |||
3796 | BUILD_BUG_ON(28 > MAXSGENTRIES + 4); | ||
3797 | |||
3798 | /* Attempt to put controller into performant mode if supported */ | ||
3799 | /* Does board support performant mode? */ | ||
3800 | trans_support = readl(&(h->cfgtable->TransportSupport)); | ||
3801 | if (!(trans_support & PERFORMANT_MODE)) | ||
3802 | return; | ||
3803 | |||
3804 | printk(KERN_WARNING "cciss%d: Placing controller into " | ||
3805 | "performant mode\n", h->ctlr); | ||
3806 | /* Performant mode demands commands on a 32 byte boundary | ||
3807 | * pci_alloc_consistent aligns on page boundarys already. | ||
3808 | * Just need to check if divisible by 32 | ||
3809 | */ | ||
3810 | if ((sizeof(CommandList_struct) % 32) != 0) { | ||
3811 | printk(KERN_WARNING "%s %d %s\n", | ||
3812 | "cciss info: command size[", | ||
3813 | (int)sizeof(CommandList_struct), | ||
3814 | "] not divisible by 32, no performant mode..\n"); | ||
3815 | return; | ||
3816 | } | ||
3817 | |||
3818 | /* Performant mode ring buffer and supporting data structures */ | ||
3819 | h->reply_pool = (__u64 *)pci_alloc_consistent( | ||
3820 | h->pdev, h->max_commands * sizeof(__u64), | ||
3821 | &(h->reply_pool_dhandle)); | ||
3822 | |||
3823 | /* Need a block fetch table for performant mode */ | ||
3824 | h->blockFetchTable = kmalloc(((h->maxsgentries+1) * | ||
3825 | sizeof(__u32)), GFP_KERNEL); | ||
3826 | |||
3827 | if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL)) | ||
3828 | goto clean_up; | ||
3829 | |||
3830 | h->reply_pool_wraparound = 1; /* spec: init to 1 */ | ||
3831 | |||
3832 | /* Controller spec: zero out this buffer. */ | ||
3833 | memset(h->reply_pool, 0, h->max_commands * sizeof(__u64)); | ||
3834 | h->reply_pool_head = h->reply_pool; | ||
3835 | |||
3836 | trans_offset = readl(&(h->cfgtable->TransMethodOffset)); | ||
3837 | calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries, | ||
3838 | h->blockFetchTable); | ||
3839 | writel(bft[0], &h->transtable->BlockFetch0); | ||
3840 | writel(bft[1], &h->transtable->BlockFetch1); | ||
3841 | writel(bft[2], &h->transtable->BlockFetch2); | ||
3842 | writel(bft[3], &h->transtable->BlockFetch3); | ||
3843 | writel(bft[4], &h->transtable->BlockFetch4); | ||
3844 | writel(bft[5], &h->transtable->BlockFetch5); | ||
3845 | writel(bft[6], &h->transtable->BlockFetch6); | ||
3846 | writel(bft[7], &h->transtable->BlockFetch7); | ||
3847 | |||
3848 | /* size of controller ring buffer */ | ||
3849 | writel(h->max_commands, &h->transtable->RepQSize); | ||
3850 | writel(1, &h->transtable->RepQCount); | ||
3851 | writel(0, &h->transtable->RepQCtrAddrLow32); | ||
3852 | writel(0, &h->transtable->RepQCtrAddrHigh32); | ||
3853 | writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); | ||
3854 | writel(0, &h->transtable->RepQAddr0High32); | ||
3855 | writel(CFGTBL_Trans_Performant, | ||
3856 | &(h->cfgtable->HostWrite.TransportRequest)); | ||
3857 | |||
3858 | h->transMethod = CFGTBL_Trans_Performant; | ||
3859 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | ||
3860 | /* under certain very rare conditions, this can take awhile. | ||
3861 | * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right | ||
3862 | * as we enter this code.) */ | ||
3863 | for (l = 0; l < MAX_CONFIG_WAIT; l++) { | ||
3864 | register_value = readl(h->vaddr + SA5_DOORBELL); | ||
3865 | if (!(register_value & CFGTBL_ChangeReq)) | ||
3866 | break; | ||
3867 | /* delay and try again */ | ||
3868 | set_current_state(TASK_INTERRUPTIBLE); | ||
3869 | schedule_timeout(10); | ||
3870 | } | ||
3871 | register_value = readl(&(h->cfgtable->TransportActive)); | ||
3872 | if (!(register_value & CFGTBL_Trans_Performant)) { | ||
3873 | printk(KERN_WARNING "cciss: unable to get board into" | ||
3874 | " performant mode\n"); | ||
3875 | return; | ||
3876 | } | ||
3877 | |||
3878 | /* Change the access methods to the performant access methods */ | ||
3879 | h->access = SA5_performant_access; | ||
3880 | |||
3881 | return; | ||
3882 | clean_up: | ||
3883 | kfree(h->blockFetchTable); | ||
3884 | if (h->reply_pool) | ||
3885 | pci_free_consistent(h->pdev, | ||
3886 | h->max_commands * sizeof(__u64), | ||
3887 | h->reply_pool, | ||
3888 | h->reply_pool_dhandle); | ||
3889 | return; | ||
3890 | |||
3891 | } /* cciss_put_controller_into_performant_mode */ | ||
3892 | |||
3703 | /* If MSI/MSI-X is supported by the kernel we will try to enable it on | 3893 | /* If MSI/MSI-X is supported by the kernel we will try to enable it on |
3704 | * controllers that are capable. If not, we use IO-APIC mode. | 3894 | * controllers that are capable. If not, we use IO-APIC mode. |
3705 | */ | 3895 | */ |
@@ -3749,7 +3939,7 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *c, | |||
3749 | default_int_mode: | 3939 | default_int_mode: |
3750 | #endif /* CONFIG_PCI_MSI */ | 3940 | #endif /* CONFIG_PCI_MSI */ |
3751 | /* if we get here we're going to use the default interrupt mode */ | 3941 | /* if we get here we're going to use the default interrupt mode */ |
3752 | c->intr[SIMPLE_MODE_INT] = pdev->irq; | 3942 | c->intr[PERF_MODE_INT] = pdev->irq; |
3753 | return; | 3943 | return; |
3754 | } | 3944 | } |
3755 | 3945 | ||
@@ -3761,6 +3951,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) | |||
3761 | __u32 cfg_base_addr; | 3951 | __u32 cfg_base_addr; |
3762 | __u64 cfg_base_addr_index; | 3952 | __u64 cfg_base_addr_index; |
3763 | int i, prod_index, err; | 3953 | int i, prod_index, err; |
3954 | __u32 trans_offset; | ||
3764 | 3955 | ||
3765 | subsystem_vendor_id = pdev->subsystem_vendor; | 3956 | subsystem_vendor_id = pdev->subsystem_vendor; |
3766 | subsystem_device_id = pdev->subsystem_device; | 3957 | subsystem_device_id = pdev->subsystem_device; |
@@ -3874,11 +4065,16 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) | |||
3874 | c->cfgtable = remap_pci_mem(pci_resource_start(pdev, | 4065 | c->cfgtable = remap_pci_mem(pci_resource_start(pdev, |
3875 | cfg_base_addr_index) + | 4066 | cfg_base_addr_index) + |
3876 | cfg_offset, sizeof(CfgTable_struct)); | 4067 | cfg_offset, sizeof(CfgTable_struct)); |
4068 | /* Find performant mode table. */ | ||
4069 | trans_offset = readl(&(c->cfgtable->TransMethodOffset)); | ||
4070 | c->transtable = remap_pci_mem(pci_resource_start(pdev, | ||
4071 | cfg_base_addr_index) + cfg_offset+trans_offset, | ||
4072 | sizeof(*c->transtable)); | ||
3877 | c->board_id = board_id; | 4073 | c->board_id = board_id; |
3878 | 4074 | ||
3879 | #ifdef CCISS_DEBUG | 4075 | #ifdef CCISS_DEBUG |
3880 | print_cfg_table(c->cfgtable); | 4076 | print_cfg_table(c->cfgtable); |
3881 | #endif /* CCISS_DEBUG */ | 4077 | #endif /* CCISS_DEBUG */ |
3882 | 4078 | ||
3883 | /* Some controllers support Zero Memory Raid (ZMR). | 4079 | /* Some controllers support Zero Memory Raid (ZMR). |
3884 | * When configured in ZMR mode the number of supported | 4080 | * When configured in ZMR mode the number of supported |
@@ -3888,7 +4084,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) | |||
3888 | * are supported on the controller then subtract 4 to | 4084 | * are supported on the controller then subtract 4 to |
3889 | * leave a little room for ioctl calls. | 4085 | * leave a little room for ioctl calls. |
3890 | */ | 4086 | */ |
3891 | c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); | 4087 | c->max_commands = readl(&(c->cfgtable->MaxPerformantModeCommands)); |
3892 | c->maxsgentries = readl(&(c->cfgtable->MaxSGElements)); | 4088 | c->maxsgentries = readl(&(c->cfgtable->MaxSGElements)); |
3893 | 4089 | ||
3894 | /* | 4090 | /* |
@@ -3933,7 +4129,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) | |||
3933 | * kernels revealed a bug in the refetch if dom0 resides on a P600. | 4129 | * kernels revealed a bug in the refetch if dom0 resides on a P600. |
3934 | */ | 4130 | */ |
3935 | if(board_id == 0x3225103C) { | 4131 | if(board_id == 0x3225103C) { |
3936 | __u32 dma_prefetch; | 4132 | __u32 dma_prefetch; |
3937 | __u32 dma_refetch; | 4133 | __u32 dma_refetch; |
3938 | dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG); | 4134 | dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG); |
3939 | dma_prefetch |= 0x8000; | 4135 | dma_prefetch |= 0x8000; |
@@ -3944,38 +4140,8 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) | |||
3944 | } | 4140 | } |
3945 | 4141 | ||
3946 | #ifdef CCISS_DEBUG | 4142 | #ifdef CCISS_DEBUG |
3947 | printk("Trying to put board into Simple mode\n"); | 4143 | printk(KERN_WARNING "Trying to put board into Performant mode\n"); |
3948 | #endif /* CCISS_DEBUG */ | 4144 | #endif /* CCISS_DEBUG */ |
3949 | c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); | ||
3950 | /* Update the field, and then ring the doorbell */ | ||
3951 | writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest)); | ||
3952 | writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL); | ||
3953 | |||
3954 | /* under certain very rare conditions, this can take awhile. | ||
3955 | * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right | ||
3956 | * as we enter this code.) */ | ||
3957 | for (i = 0; i < MAX_CONFIG_WAIT; i++) { | ||
3958 | if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) | ||
3959 | break; | ||
3960 | /* delay and try again */ | ||
3961 | set_current_state(TASK_INTERRUPTIBLE); | ||
3962 | schedule_timeout(msecs_to_jiffies(1)); | ||
3963 | } | ||
3964 | |||
3965 | #ifdef CCISS_DEBUG | ||
3966 | printk(KERN_DEBUG "I counter got to %d %x\n", i, | ||
3967 | readl(c->vaddr + SA5_DOORBELL)); | ||
3968 | #endif /* CCISS_DEBUG */ | ||
3969 | #ifdef CCISS_DEBUG | ||
3970 | print_cfg_table(c->cfgtable); | ||
3971 | #endif /* CCISS_DEBUG */ | ||
3972 | |||
3973 | if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { | ||
3974 | printk(KERN_WARNING "cciss: unable to get board into" | ||
3975 | " simple mode\n"); | ||
3976 | err = -ENODEV; | ||
3977 | goto err_out_free_res; | ||
3978 | } | ||
3979 | return 0; | 4145 | return 0; |
3980 | 4146 | ||
3981 | err_out_free_res: | 4147 | err_out_free_res: |
@@ -3984,6 +4150,7 @@ err_out_free_res: | |||
3984 | * Smart Array controllers that pci_enable_device does not undo | 4150 | * Smart Array controllers that pci_enable_device does not undo |
3985 | */ | 4151 | */ |
3986 | pci_release_regions(pdev); | 4152 | pci_release_regions(pdev); |
4153 | cciss_put_controller_into_performant_mode(c); | ||
3987 | return err; | 4154 | return err; |
3988 | } | 4155 | } |
3989 | 4156 | ||
@@ -4260,7 +4427,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
4260 | i = alloc_cciss_hba(); | 4427 | i = alloc_cciss_hba(); |
4261 | if (i < 0) | 4428 | if (i < 0) |
4262 | return -1; | 4429 | return -1; |
4263 | |||
4264 | hba[i]->busy_initializing = 1; | 4430 | hba[i]->busy_initializing = 1; |
4265 | INIT_HLIST_HEAD(&hba[i]->cmpQ); | 4431 | INIT_HLIST_HEAD(&hba[i]->cmpQ); |
4266 | INIT_HLIST_HEAD(&hba[i]->reqQ); | 4432 | INIT_HLIST_HEAD(&hba[i]->reqQ); |
@@ -4327,7 +4493,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
4327 | 4493 | ||
4328 | printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", | 4494 | printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", |
4329 | hba[i]->devname, pdev->device, pci_name(pdev), | 4495 | hba[i]->devname, pdev->device, pci_name(pdev), |
4330 | hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not"); | 4496 | hba[i]->intr[PERF_MODE_INT], dac ? "" : " not"); |
4331 | 4497 | ||
4332 | hba[i]->cmd_pool_bits = | 4498 | hba[i]->cmd_pool_bits = |
4333 | kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG) | 4499 | kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG) |
@@ -4433,7 +4599,7 @@ clean4: | |||
4433 | hba[i]->nr_cmds * sizeof(ErrorInfo_struct), | 4599 | hba[i]->nr_cmds * sizeof(ErrorInfo_struct), |
4434 | hba[i]->errinfo_pool, | 4600 | hba[i]->errinfo_pool, |
4435 | hba[i]->errinfo_pool_dhandle); | 4601 | hba[i]->errinfo_pool_dhandle); |
4436 | free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]); | 4602 | free_irq(hba[i]->intr[PERF_MODE_INT], hba[i]); |
4437 | clean2: | 4603 | clean2: |
4438 | unregister_blkdev(hba[i]->major, hba[i]->devname); | 4604 | unregister_blkdev(hba[i]->major, hba[i]->devname); |
4439 | clean1: | 4605 | clean1: |
@@ -4475,7 +4641,7 @@ static void cciss_shutdown(struct pci_dev *pdev) | |||
4475 | printk(KERN_WARNING "cciss%d: Error flushing cache\n", | 4641 | printk(KERN_WARNING "cciss%d: Error flushing cache\n", |
4476 | h->ctlr); | 4642 | h->ctlr); |
4477 | h->access.set_intr_mask(h, CCISS_INTR_OFF); | 4643 | h->access.set_intr_mask(h, CCISS_INTR_OFF); |
4478 | free_irq(h->intr[2], h); | 4644 | free_irq(h->intr[PERF_MODE_INT], h); |
4479 | } | 4645 | } |
4480 | 4646 | ||
4481 | static void __devexit cciss_remove_one(struct pci_dev *pdev) | 4647 | static void __devexit cciss_remove_one(struct pci_dev *pdev) |
@@ -4575,7 +4741,6 @@ static int __init cciss_init(void) | |||
4575 | * array of them, the size must be a multiple of 8 bytes. | 4741 | * array of them, the size must be a multiple of 8 bytes. |
4576 | */ | 4742 | */ |
4577 | BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT); | 4743 | BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT); |
4578 | |||
4579 | printk(KERN_INFO DRIVER_NAME "\n"); | 4744 | printk(KERN_INFO DRIVER_NAME "\n"); |
4580 | 4745 | ||
4581 | err = bus_register(&cciss_bus_type); | 4746 | err = bus_register(&cciss_bus_type); |