aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/hpsa.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/hpsa.c')
-rw-r--r--drivers/scsi/hpsa.c330
1 files changed, 152 insertions, 178 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 03697ba94251..183d3a43c280 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -43,6 +43,7 @@
43#include <scsi/scsi_cmnd.h> 43#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_device.h> 44#include <scsi/scsi_device.h>
45#include <scsi/scsi_host.h> 45#include <scsi/scsi_host.h>
46#include <scsi/scsi_tcq.h>
46#include <linux/cciss_ioctl.h> 47#include <linux/cciss_ioctl.h>
47#include <linux/string.h> 48#include <linux/string.h>
48#include <linux/bitmap.h> 49#include <linux/bitmap.h>
@@ -52,7 +53,7 @@
52#include "hpsa.h" 53#include "hpsa.h"
53 54
54/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 55/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
55#define HPSA_DRIVER_VERSION "2.0.1-3" 56#define HPSA_DRIVER_VERSION "2.0.2-1"
56#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 57#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
57 58
58/* How long to wait (in milliseconds) for board to go into simple mode */ 59/* How long to wait (in milliseconds) for board to go into simple mode */
@@ -134,6 +135,8 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
134static void hpsa_scan_start(struct Scsi_Host *); 135static void hpsa_scan_start(struct Scsi_Host *);
135static int hpsa_scan_finished(struct Scsi_Host *sh, 136static int hpsa_scan_finished(struct Scsi_Host *sh,
136 unsigned long elapsed_time); 137 unsigned long elapsed_time);
138static int hpsa_change_queue_depth(struct scsi_device *sdev,
139 int qdepth, int reason);
137 140
138static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 141static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
139static int hpsa_slave_alloc(struct scsi_device *sdev); 142static int hpsa_slave_alloc(struct scsi_device *sdev);
@@ -182,8 +185,8 @@ static struct scsi_host_template hpsa_driver_template = {
182 .queuecommand = hpsa_scsi_queue_command, 185 .queuecommand = hpsa_scsi_queue_command,
183 .scan_start = hpsa_scan_start, 186 .scan_start = hpsa_scan_start,
184 .scan_finished = hpsa_scan_finished, 187 .scan_finished = hpsa_scan_finished,
188 .change_queue_depth = hpsa_change_queue_depth,
185 .this_id = -1, 189 .this_id = -1,
186 .sg_tablesize = MAXSGENTRIES,
187 .use_clustering = ENABLE_CLUSTERING, 190 .use_clustering = ENABLE_CLUSTERING,
188 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 191 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
189 .ioctl = hpsa_ioctl, 192 .ioctl = hpsa_ioctl,
@@ -208,133 +211,6 @@ static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
208 return (struct ctlr_info *) *priv; 211 return (struct ctlr_info *) *priv;
209} 212}
210 213
211static struct task_struct *hpsa_scan_thread;
212static DEFINE_MUTEX(hpsa_scan_mutex);
213static LIST_HEAD(hpsa_scan_q);
214static int hpsa_scan_func(void *data);
215
216/**
217 * add_to_scan_list() - add controller to rescan queue
218 * @h: Pointer to the controller.
219 *
220 * Adds the controller to the rescan queue if not already on the queue.
221 *
222 * returns 1 if added to the queue, 0 if skipped (could be on the
223 * queue already, or the controller could be initializing or shutting
224 * down).
225 **/
226static int add_to_scan_list(struct ctlr_info *h)
227{
228 struct ctlr_info *test_h;
229 int found = 0;
230 int ret = 0;
231
232 if (h->busy_initializing)
233 return 0;
234
235 /*
236 * If we don't get the lock, it means the driver is unloading
237 * and there's no point in scheduling a new scan.
238 */
239 if (!mutex_trylock(&h->busy_shutting_down))
240 return 0;
241
242 mutex_lock(&hpsa_scan_mutex);
243 list_for_each_entry(test_h, &hpsa_scan_q, scan_list) {
244 if (test_h == h) {
245 found = 1;
246 break;
247 }
248 }
249 if (!found && !h->busy_scanning) {
250 INIT_COMPLETION(h->scan_wait);
251 list_add_tail(&h->scan_list, &hpsa_scan_q);
252 ret = 1;
253 }
254 mutex_unlock(&hpsa_scan_mutex);
255 mutex_unlock(&h->busy_shutting_down);
256
257 return ret;
258}
259
260/**
261 * remove_from_scan_list() - remove controller from rescan queue
262 * @h: Pointer to the controller.
263 *
264 * Removes the controller from the rescan queue if present. Blocks if
265 * the controller is currently conducting a rescan. The controller
266 * can be in one of three states:
267 * 1. Doesn't need a scan
268 * 2. On the scan list, but not scanning yet (we remove it)
269 * 3. Busy scanning (and not on the list). In this case we want to wait for
270 * the scan to complete to make sure the scanning thread for this
271 * controller is completely idle.
272 **/
273static void remove_from_scan_list(struct ctlr_info *h)
274{
275 struct ctlr_info *test_h, *tmp_h;
276
277 mutex_lock(&hpsa_scan_mutex);
278 list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) {
279 if (test_h == h) { /* state 2. */
280 list_del(&h->scan_list);
281 complete_all(&h->scan_wait);
282 mutex_unlock(&hpsa_scan_mutex);
283 return;
284 }
285 }
286 if (h->busy_scanning) { /* state 3. */
287 mutex_unlock(&hpsa_scan_mutex);
288 wait_for_completion(&h->scan_wait);
289 } else { /* state 1, nothing to do. */
290 mutex_unlock(&hpsa_scan_mutex);
291 }
292}
293
294/* hpsa_scan_func() - kernel thread used to rescan controllers
295 * @data: Ignored.
296 *
297 * A kernel thread used scan for drive topology changes on
298 * controllers. The thread processes only one controller at a time
299 * using a queue. Controllers are added to the queue using
300 * add_to_scan_list() and removed from the queue either after done
301 * processing or using remove_from_scan_list().
302 *
303 * returns 0.
304 **/
305static int hpsa_scan_func(__attribute__((unused)) void *data)
306{
307 struct ctlr_info *h;
308 int host_no;
309
310 while (1) {
311 set_current_state(TASK_INTERRUPTIBLE);
312 schedule();
313 if (kthread_should_stop())
314 break;
315
316 while (1) {
317 mutex_lock(&hpsa_scan_mutex);
318 if (list_empty(&hpsa_scan_q)) {
319 mutex_unlock(&hpsa_scan_mutex);
320 break;
321 }
322 h = list_entry(hpsa_scan_q.next, struct ctlr_info,
323 scan_list);
324 list_del(&h->scan_list);
325 h->busy_scanning = 1;
326 mutex_unlock(&hpsa_scan_mutex);
327 host_no = h->scsi_host ? h->scsi_host->host_no : -1;
328 hpsa_scan_start(h->scsi_host);
329 complete_all(&h->scan_wait);
330 mutex_lock(&hpsa_scan_mutex);
331 h->busy_scanning = 0;
332 mutex_unlock(&hpsa_scan_mutex);
333 }
334 }
335 return 0;
336}
337
338static int check_for_unit_attention(struct ctlr_info *h, 214static int check_for_unit_attention(struct ctlr_info *h,
339 struct CommandList *c) 215 struct CommandList *c)
340{ 216{
@@ -352,21 +228,8 @@ static int check_for_unit_attention(struct ctlr_info *h,
352 break; 228 break;
353 case REPORT_LUNS_CHANGED: 229 case REPORT_LUNS_CHANGED:
354 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data " 230 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
355 "changed\n", h->ctlr); 231 "changed, action required\n", h->ctlr);
356 /* 232 /*
357 * Here, we could call add_to_scan_list and wake up the scan thread,
358 * except that it's quite likely that we will get more than one
359 * REPORT_LUNS_CHANGED condition in quick succession, which means
360 * that those which occur after the first one will likely happen
361 * *during* the hpsa_scan_thread's rescan. And the rescan code is not
362 * robust enough to restart in the middle, undoing what it has already
363 * done, and it's not clear that it's even possible to do this, since
364 * part of what it does is notify the SCSI mid layer, which starts
365 * doing it's own i/o to read partition tables and so on, and the
366 * driver doesn't have visibility to know what might need undoing.
367 * In any event, if possible, it is horribly complicated to get right
368 * so we just don't do it for now.
369 *
370 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. 233 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
371 */ 234 */
372 break; 235 break;
@@ -393,10 +256,7 @@ static ssize_t host_store_rescan(struct device *dev,
393 struct ctlr_info *h; 256 struct ctlr_info *h;
394 struct Scsi_Host *shost = class_to_shost(dev); 257 struct Scsi_Host *shost = class_to_shost(dev);
395 h = shost_to_hba(shost); 258 h = shost_to_hba(shost);
396 if (add_to_scan_list(h)) { 259 hpsa_scan_start(h->scsi_host);
397 wake_up_process(hpsa_scan_thread);
398 wait_for_completion_interruptible(&h->scan_wait);
399 }
400 return count; 260 return count;
401} 261}
402 262
@@ -983,6 +843,76 @@ static void hpsa_scsi_setup(struct ctlr_info *h)
983 spin_lock_init(&h->devlock); 843 spin_lock_init(&h->devlock);
984} 844}
985 845
846static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
847{
848 int i;
849
850 if (!h->cmd_sg_list)
851 return;
852 for (i = 0; i < h->nr_cmds; i++) {
853 kfree(h->cmd_sg_list[i]);
854 h->cmd_sg_list[i] = NULL;
855 }
856 kfree(h->cmd_sg_list);
857 h->cmd_sg_list = NULL;
858}
859
860static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
861{
862 int i;
863
864 if (h->chainsize <= 0)
865 return 0;
866
867 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
868 GFP_KERNEL);
869 if (!h->cmd_sg_list)
870 return -ENOMEM;
871 for (i = 0; i < h->nr_cmds; i++) {
872 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
873 h->chainsize, GFP_KERNEL);
874 if (!h->cmd_sg_list[i])
875 goto clean;
876 }
877 return 0;
878
879clean:
880 hpsa_free_sg_chain_blocks(h);
881 return -ENOMEM;
882}
883
884static void hpsa_map_sg_chain_block(struct ctlr_info *h,
885 struct CommandList *c)
886{
887 struct SGDescriptor *chain_sg, *chain_block;
888 u64 temp64;
889
890 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
891 chain_block = h->cmd_sg_list[c->cmdindex];
892 chain_sg->Ext = HPSA_SG_CHAIN;
893 chain_sg->Len = sizeof(*chain_sg) *
894 (c->Header.SGTotal - h->max_cmd_sg_entries);
895 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
896 PCI_DMA_TODEVICE);
897 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
898 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
899}
900
901static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
902 struct CommandList *c)
903{
904 struct SGDescriptor *chain_sg;
905 union u64bit temp64;
906
907 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
908 return;
909
910 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
911 temp64.val32.lower = chain_sg->Addr.lower;
912 temp64.val32.upper = chain_sg->Addr.upper;
913 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
914}
915
986static void complete_scsi_command(struct CommandList *cp, 916static void complete_scsi_command(struct CommandList *cp,
987 int timeout, u32 tag) 917 int timeout, u32 tag)
988{ 918{
@@ -999,10 +929,12 @@ static void complete_scsi_command(struct CommandList *cp,
999 h = cp->h; 929 h = cp->h;
1000 930
1001 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 931 scsi_dma_unmap(cmd); /* undo the DMA mappings */
932 if (cp->Header.SGTotal > h->max_cmd_sg_entries)
933 hpsa_unmap_sg_chain_block(h, cp);
1002 934
1003 cmd->result = (DID_OK << 16); /* host byte */ 935 cmd->result = (DID_OK << 16); /* host byte */
1004 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 936 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1005 cmd->result |= (ei->ScsiStatus << 1); 937 cmd->result |= ei->ScsiStatus;
1006 938
1007 /* copy the sense data whether we need to or not. */ 939 /* copy the sense data whether we need to or not. */
1008 memcpy(cmd->sense_buffer, ei->SenseInfo, 940 memcpy(cmd->sense_buffer, ei->SenseInfo,
@@ -1203,6 +1135,7 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
1203 sh->max_id = HPSA_MAX_LUN; 1135 sh->max_id = HPSA_MAX_LUN;
1204 sh->can_queue = h->nr_cmds; 1136 sh->can_queue = h->nr_cmds;
1205 sh->cmd_per_lun = h->nr_cmds; 1137 sh->cmd_per_lun = h->nr_cmds;
1138 sh->sg_tablesize = h->maxsgentries;
1206 h->scsi_host = sh; 1139 h->scsi_host = sh;
1207 sh->hostdata[0] = (unsigned long) h; 1140 sh->hostdata[0] = (unsigned long) h;
1208 sh->irq = h->intr[PERF_MODE_INT]; 1141 sh->irq = h->intr[PERF_MODE_INT];
@@ -1382,7 +1315,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1382 1315
1383 if (c == NULL) { /* trouble... */ 1316 if (c == NULL) { /* trouble... */
1384 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1317 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1385 return -1; 1318 return -ENOMEM;
1386 } 1319 }
1387 1320
1388 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); 1321 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
@@ -1904,16 +1837,17 @@ out:
1904 * dma mapping and fills in the scatter gather entries of the 1837 * dma mapping and fills in the scatter gather entries of the
1905 * hpsa command, cp. 1838 * hpsa command, cp.
1906 */ 1839 */
1907static int hpsa_scatter_gather(struct pci_dev *pdev, 1840static int hpsa_scatter_gather(struct ctlr_info *h,
1908 struct CommandList *cp, 1841 struct CommandList *cp,
1909 struct scsi_cmnd *cmd) 1842 struct scsi_cmnd *cmd)
1910{ 1843{
1911 unsigned int len; 1844 unsigned int len;
1912 struct scatterlist *sg; 1845 struct scatterlist *sg;
1913 u64 addr64; 1846 u64 addr64;
1914 int use_sg, i; 1847 int use_sg, i, sg_index, chained;
1848 struct SGDescriptor *curr_sg;
1915 1849
1916 BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES); 1850 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
1917 1851
1918 use_sg = scsi_dma_map(cmd); 1852 use_sg = scsi_dma_map(cmd);
1919 if (use_sg < 0) 1853 if (use_sg < 0)
@@ -1922,15 +1856,33 @@ static int hpsa_scatter_gather(struct pci_dev *pdev,
1922 if (!use_sg) 1856 if (!use_sg)
1923 goto sglist_finished; 1857 goto sglist_finished;
1924 1858
1859 curr_sg = cp->SG;
1860 chained = 0;
1861 sg_index = 0;
1925 scsi_for_each_sg(cmd, sg, use_sg, i) { 1862 scsi_for_each_sg(cmd, sg, use_sg, i) {
1863 if (i == h->max_cmd_sg_entries - 1 &&
1864 use_sg > h->max_cmd_sg_entries) {
1865 chained = 1;
1866 curr_sg = h->cmd_sg_list[cp->cmdindex];
1867 sg_index = 0;
1868 }
1926 addr64 = (u64) sg_dma_address(sg); 1869 addr64 = (u64) sg_dma_address(sg);
1927 len = sg_dma_len(sg); 1870 len = sg_dma_len(sg);
1928 cp->SG[i].Addr.lower = 1871 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
1929 (u32) (addr64 & (u64) 0x00000000FFFFFFFF); 1872 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
1930 cp->SG[i].Addr.upper = 1873 curr_sg->Len = len;
1931 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); 1874 curr_sg->Ext = 0; /* we are not chaining */
1932 cp->SG[i].Len = len; 1875 curr_sg++;
1933 cp->SG[i].Ext = 0; /* we are not chaining */ 1876 }
1877
1878 if (use_sg + chained > h->maxSG)
1879 h->maxSG = use_sg + chained;
1880
1881 if (chained) {
1882 cp->Header.SGList = h->max_cmd_sg_entries;
1883 cp->Header.SGTotal = (u16) (use_sg + 1);
1884 hpsa_map_sg_chain_block(h, cp);
1885 return 0;
1934 } 1886 }
1935 1887
1936sglist_finished: 1888sglist_finished:
@@ -2026,7 +1978,7 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
2026 break; 1978 break;
2027 } 1979 }
2028 1980
2029 if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */ 1981 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
2030 cmd_free(h, c); 1982 cmd_free(h, c);
2031 return SCSI_MLQUEUE_HOST_BUSY; 1983 return SCSI_MLQUEUE_HOST_BUSY;
2032 } 1984 }
@@ -2077,6 +2029,23 @@ static int hpsa_scan_finished(struct Scsi_Host *sh,
2077 return finished; 2029 return finished;
2078} 2030}
2079 2031
2032static int hpsa_change_queue_depth(struct scsi_device *sdev,
2033 int qdepth, int reason)
2034{
2035 struct ctlr_info *h = sdev_to_hba(sdev);
2036
2037 if (reason != SCSI_QDEPTH_DEFAULT)
2038 return -ENOTSUPP;
2039
2040 if (qdepth < 1)
2041 qdepth = 1;
2042 else
2043 if (qdepth > h->nr_cmds)
2044 qdepth = h->nr_cmds;
2045 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2046 return sdev->queue_depth;
2047}
2048
2080static void hpsa_unregister_scsi(struct ctlr_info *h) 2049static void hpsa_unregister_scsi(struct ctlr_info *h)
2081{ 2050{
2082 /* we are being forcibly unloaded, and may not refuse. */ 2051 /* we are being forcibly unloaded, and may not refuse. */
@@ -2961,7 +2930,7 @@ static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
2961 return IRQ_HANDLED; 2930 return IRQ_HANDLED;
2962} 2931}
2963 2932
2964/* Send a message CDB to the firmwart. */ 2933/* Send a message CDB to the firmware. */
2965static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 2934static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
2966 unsigned char type) 2935 unsigned char type)
2967{ 2936{
@@ -3296,7 +3265,7 @@ default_int_mode:
3296 h->intr[PERF_MODE_INT] = pdev->irq; 3265 h->intr[PERF_MODE_INT] = pdev->irq;
3297} 3266}
3298 3267
3299static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) 3268static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3300{ 3269{
3301 ushort subsystem_vendor_id, subsystem_device_id, command; 3270 ushort subsystem_vendor_id, subsystem_device_id, command;
3302 u32 board_id, scratchpad = 0; 3271 u32 board_id, scratchpad = 0;
@@ -3405,6 +3374,23 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3405 3374
3406 h->board_id = board_id; 3375 h->board_id = board_id;
3407 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 3376 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3377 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
3378
3379 /*
3380 * Limit in-command s/g elements to 32 save dma'able memory.
3381 * Howvever spec says if 0, use 31
3382 */
3383
3384 h->max_cmd_sg_entries = 31;
3385 if (h->maxsgentries > 512) {
3386 h->max_cmd_sg_entries = 32;
3387 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
3388 h->maxsgentries--; /* save one for chain pointer */
3389 } else {
3390 h->maxsgentries = 31; /* default to traditional values */
3391 h->chainsize = 0;
3392 }
3393
3408 h->product_name = products[prod_index].product_name; 3394 h->product_name = products[prod_index].product_name;
3409 h->access = *(products[prod_index].access); 3395 h->access = *(products[prod_index].access);
3410 /* Allow room for some ioctls */ 3396 /* Allow room for some ioctls */
@@ -3532,8 +3518,6 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3532 h->busy_initializing = 1; 3518 h->busy_initializing = 1;
3533 INIT_HLIST_HEAD(&h->cmpQ); 3519 INIT_HLIST_HEAD(&h->cmpQ);
3534 INIT_HLIST_HEAD(&h->reqQ); 3520 INIT_HLIST_HEAD(&h->reqQ);
3535 mutex_init(&h->busy_shutting_down);
3536 init_completion(&h->scan_wait);
3537 rc = hpsa_pci_init(h, pdev); 3521 rc = hpsa_pci_init(h, pdev);
3538 if (rc != 0) 3522 if (rc != 0)
3539 goto clean1; 3523 goto clean1;
@@ -3587,6 +3571,8 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3587 rc = -ENOMEM; 3571 rc = -ENOMEM;
3588 goto clean4; 3572 goto clean4;
3589 } 3573 }
3574 if (hpsa_allocate_sg_chain_blocks(h))
3575 goto clean4;
3590 spin_lock_init(&h->lock); 3576 spin_lock_init(&h->lock);
3591 spin_lock_init(&h->scan_lock); 3577 spin_lock_init(&h->scan_lock);
3592 init_waitqueue_head(&h->scan_wait_queue); 3578 init_waitqueue_head(&h->scan_wait_queue);
@@ -3609,6 +3595,7 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3609 return 1; 3595 return 1;
3610 3596
3611clean4: 3597clean4:
3598 hpsa_free_sg_chain_blocks(h);
3612 kfree(h->cmd_pool_bits); 3599 kfree(h->cmd_pool_bits);
3613 if (h->cmd_pool) 3600 if (h->cmd_pool)
3614 pci_free_consistent(h->pdev, 3601 pci_free_consistent(h->pdev,
@@ -3681,11 +3668,10 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
3681 return; 3668 return;
3682 } 3669 }
3683 h = pci_get_drvdata(pdev); 3670 h = pci_get_drvdata(pdev);
3684 mutex_lock(&h->busy_shutting_down);
3685 remove_from_scan_list(h);
3686 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 3671 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
3687 hpsa_shutdown(pdev); 3672 hpsa_shutdown(pdev);
3688 iounmap(h->vaddr); 3673 iounmap(h->vaddr);
3674 hpsa_free_sg_chain_blocks(h);
3689 pci_free_consistent(h->pdev, 3675 pci_free_consistent(h->pdev,
3690 h->nr_cmds * sizeof(struct CommandList), 3676 h->nr_cmds * sizeof(struct CommandList),
3691 h->cmd_pool, h->cmd_pool_dhandle); 3677 h->cmd_pool, h->cmd_pool_dhandle);
@@ -3703,7 +3689,6 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
3703 */ 3689 */
3704 pci_release_regions(pdev); 3690 pci_release_regions(pdev);
3705 pci_set_drvdata(pdev, NULL); 3691 pci_set_drvdata(pdev, NULL);
3706 mutex_unlock(&h->busy_shutting_down);
3707 kfree(h); 3692 kfree(h);
3708} 3693}
3709 3694
@@ -3857,23 +3842,12 @@ clean_up:
3857 */ 3842 */
3858static int __init hpsa_init(void) 3843static int __init hpsa_init(void)
3859{ 3844{
3860 int err; 3845 return pci_register_driver(&hpsa_pci_driver);
3861 /* Start the scan thread */
3862 hpsa_scan_thread = kthread_run(hpsa_scan_func, NULL, "hpsa_scan");
3863 if (IS_ERR(hpsa_scan_thread)) {
3864 err = PTR_ERR(hpsa_scan_thread);
3865 return -ENODEV;
3866 }
3867 err = pci_register_driver(&hpsa_pci_driver);
3868 if (err)
3869 kthread_stop(hpsa_scan_thread);
3870 return err;
3871} 3846}
3872 3847
3873static void __exit hpsa_cleanup(void) 3848static void __exit hpsa_cleanup(void)
3874{ 3849{
3875 pci_unregister_driver(&hpsa_pci_driver); 3850 pci_unregister_driver(&hpsa_pci_driver);
3876 kthread_stop(hpsa_scan_thread);
3877} 3851}
3878 3852
3879module_init(hpsa_init); 3853module_init(hpsa_init);