diff options
Diffstat (limited to 'drivers/message')
| -rw-r--r-- | drivers/message/fusion/mptctl.c | 7 | ||||
| -rw-r--r-- | drivers/message/fusion/mptlan.c | 108 | ||||
| -rw-r--r-- | drivers/message/fusion/mptscsih.c | 3 | ||||
| -rw-r--r-- | drivers/message/i2o/Makefile | 2 | ||||
| -rw-r--r-- | drivers/message/i2o/device.c | 2 | ||||
| -rw-r--r-- | drivers/message/i2o/exec-osm.c | 4 | ||||
| -rw-r--r-- | drivers/message/i2o/i2o_block.c | 25 | ||||
| -rw-r--r-- | drivers/message/i2o/i2o_config.c | 52 | ||||
| -rw-r--r-- | drivers/message/i2o/iop.c | 2 | ||||
| -rw-r--r-- | drivers/message/i2o/memory.c | 313 | ||||
| -rw-r--r-- | drivers/message/i2o/pci.c | 16 | 
11 files changed, 357 insertions, 177 deletions
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index f5233f3d9eff..b89f476cd0a9 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c  | |||
| @@ -559,12 +559,6 @@ mptctl_fasync(int fd, struct file *filep, int mode) | |||
| 559 | return ret; | 559 | return ret; | 
| 560 | } | 560 | } | 
| 561 | 561 | ||
| 562 | static int | ||
| 563 | mptctl_release(struct inode *inode, struct file *filep) | ||
| 564 | { | ||
| 565 | return fasync_helper(-1, filep, 0, &async_queue); | ||
| 566 | } | ||
| 567 | |||
| 568 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 562 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 
| 569 | /* | 563 | /* | 
| 570 | * MPT ioctl handler | 564 | * MPT ioctl handler | 
| @@ -2706,7 +2700,6 @@ mptctl_hp_targetinfo(unsigned long arg) | |||
| 2706 | static const struct file_operations mptctl_fops = { | 2700 | static const struct file_operations mptctl_fops = { | 
| 2707 | .owner = THIS_MODULE, | 2701 | .owner = THIS_MODULE, | 
| 2708 | .llseek = no_llseek, | 2702 | .llseek = no_llseek, | 
| 2709 | .release = mptctl_release, | ||
| 2710 | .fasync = mptctl_fasync, | 2703 | .fasync = mptctl_fasync, | 
| 2711 | .unlocked_ioctl = mptctl_ioctl, | 2704 | .unlocked_ioctl = mptctl_ioctl, | 
| 2712 | #ifdef CONFIG_COMPAT | 2705 | #ifdef CONFIG_COMPAT | 
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c index a1abf95cf751..603ffd008c73 100644 --- a/drivers/message/fusion/mptlan.c +++ b/drivers/message/fusion/mptlan.c  | |||
| @@ -77,12 +77,6 @@ MODULE_VERSION(my_VERSION); | |||
| 77 | * Fusion MPT LAN private structures | 77 | * Fusion MPT LAN private structures | 
| 78 | */ | 78 | */ | 
| 79 | 79 | ||
| 80 | struct NAA_Hosed { | ||
| 81 | u16 NAA; | ||
| 82 | u8 ieee[FC_ALEN]; | ||
| 83 | struct NAA_Hosed *next; | ||
| 84 | }; | ||
| 85 | |||
| 86 | struct BufferControl { | 80 | struct BufferControl { | 
| 87 | struct sk_buff *skb; | 81 | struct sk_buff *skb; | 
| 88 | dma_addr_t dma; | 82 | dma_addr_t dma; | 
| @@ -159,11 +153,6 @@ static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS; | |||
| 159 | static u32 max_buckets_out = 127; | 153 | static u32 max_buckets_out = 127; | 
| 160 | static u32 tx_max_out_p = 127 - 16; | 154 | static u32 tx_max_out_p = 127 - 16; | 
| 161 | 155 | ||
| 162 | #ifdef QLOGIC_NAA_WORKAROUND | ||
| 163 | static struct NAA_Hosed *mpt_bad_naa = NULL; | ||
| 164 | DEFINE_RWLOCK(bad_naa_lock); | ||
| 165 | #endif | ||
| 166 | |||
| 167 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 156 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 
| 168 | /** | 157 | /** | 
| 169 | * lan_reply - Handle all data sent from the hardware. | 158 | * lan_reply - Handle all data sent from the hardware. | 
| @@ -780,30 +769,6 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) | |||
| 780 | // ctx, skb, skb->data)); | 769 | // ctx, skb, skb->data)); | 
| 781 | 770 | ||
| 782 | mac = skb_mac_header(skb); | 771 | mac = skb_mac_header(skb); | 
| 783 | #ifdef QLOGIC_NAA_WORKAROUND | ||
| 784 | { | ||
| 785 | struct NAA_Hosed *nh; | ||
| 786 | |||
| 787 | /* Munge the NAA for Tx packets to QLogic boards, which don't follow | ||
| 788 | RFC 2625. The longer I look at this, the more my opinion of Qlogic | ||
| 789 | drops. */ | ||
| 790 | read_lock_irq(&bad_naa_lock); | ||
| 791 | for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) { | ||
| 792 | if ((nh->ieee[0] == mac[0]) && | ||
| 793 | (nh->ieee[1] == mac[1]) && | ||
| 794 | (nh->ieee[2] == mac[2]) && | ||
| 795 | (nh->ieee[3] == mac[3]) && | ||
| 796 | (nh->ieee[4] == mac[4]) && | ||
| 797 | (nh->ieee[5] == mac[5])) { | ||
| 798 | cur_naa = nh->NAA; | ||
| 799 | dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value " | ||
| 800 | "= %04x.\n", cur_naa)); | ||
| 801 | break; | ||
| 802 | } | ||
| 803 | } | ||
| 804 | read_unlock_irq(&bad_naa_lock); | ||
| 805 | } | ||
| 806 | #endif | ||
| 807 | 772 | ||
| 808 | pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | | 773 | pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | | 
| 809 | (mac[0] << 8) | | 774 | (mac[0] << 8) | | 
| @@ -1572,79 +1537,6 @@ mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
| 1572 | 1537 | ||
| 1573 | fcllc = (struct fcllc *)skb->data; | 1538 | fcllc = (struct fcllc *)skb->data; | 
| 1574 | 1539 | ||
| 1575 | #ifdef QLOGIC_NAA_WORKAROUND | ||
| 1576 | { | ||
| 1577 | u16 source_naa = fch->stype, found = 0; | ||
| 1578 | |||
| 1579 | /* Workaround for QLogic not following RFC 2625 in regards to the NAA | ||
| 1580 | value. */ | ||
| 1581 | |||
| 1582 | if ((source_naa & 0xF000) == 0) | ||
| 1583 | source_naa = swab16(source_naa); | ||
| 1584 | |||
| 1585 | if (fcllc->ethertype == htons(ETH_P_ARP)) | ||
| 1586 | dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of " | ||
| 1587 | "%04x.\n", source_naa)); | ||
| 1588 | |||
| 1589 | if ((fcllc->ethertype == htons(ETH_P_ARP)) && | ||
| 1590 | ((source_naa >> 12) != MPT_LAN_NAA_RFC2625)){ | ||
| 1591 | struct NAA_Hosed *nh, *prevnh; | ||
| 1592 | int i; | ||
| 1593 | |||
| 1594 | dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from " | ||
| 1595 | "system with non-RFC 2625 NAA value (%04x).\n", | ||
| 1596 | source_naa)); | ||
| 1597 | |||
| 1598 | write_lock_irq(&bad_naa_lock); | ||
| 1599 | for (prevnh = nh = mpt_bad_naa; nh != NULL; | ||
| 1600 | prevnh=nh, nh=nh->next) { | ||
| 1601 | if ((nh->ieee[0] == fch->saddr[0]) && | ||
| 1602 | (nh->ieee[1] == fch->saddr[1]) && | ||
| 1603 | (nh->ieee[2] == fch->saddr[2]) && | ||
| 1604 | (nh->ieee[3] == fch->saddr[3]) && | ||
| 1605 | (nh->ieee[4] == fch->saddr[4]) && | ||
| 1606 | (nh->ieee[5] == fch->saddr[5])) { | ||
| 1607 | found = 1; | ||
| 1608 | dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re" | ||
| 1609 | "q/Rep w/ bad NAA from system already" | ||
| 1610 | " in DB.\n")); | ||
| 1611 | break; | ||
| 1612 | } | ||
| 1613 | } | ||
| 1614 | |||
| 1615 | if ((!found) && (nh == NULL)) { | ||
| 1616 | |||
| 1617 | nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL); | ||
| 1618 | dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/" | ||
| 1619 | " bad NAA from system not yet in DB.\n")); | ||
| 1620 | |||
| 1621 | if (nh != NULL) { | ||
| 1622 | nh->next = NULL; | ||
| 1623 | if (!mpt_bad_naa) | ||
| 1624 | mpt_bad_naa = nh; | ||
| 1625 | if (prevnh) | ||
| 1626 | prevnh->next = nh; | ||
| 1627 | |||
| 1628 | nh->NAA = source_naa; /* Set the S_NAA value. */ | ||
| 1629 | for (i = 0; i < FC_ALEN; i++) | ||
| 1630 | nh->ieee[i] = fch->saddr[i]; | ||
| 1631 | dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:" | ||
| 1632 | "%02x:%02x with non-compliant S_NAA value.\n", | ||
| 1633 | fch->saddr[0], fch->saddr[1], fch->saddr[2], | ||
| 1634 | fch->saddr[3], fch->saddr[4],fch->saddr[5])); | ||
| 1635 | } else { | ||
| 1636 | printk (KERN_ERR "mptlan/type_trans: Unable to" | ||
| 1637 | " kmalloc a NAA_Hosed struct.\n"); | ||
| 1638 | } | ||
| 1639 | } else if (!found) { | ||
| 1640 | printk (KERN_ERR "mptlan/type_trans: found not" | ||
| 1641 | " set, but nh isn't null. Evil " | ||
| 1642 | "funkiness abounds.\n"); | ||
| 1643 | } | ||
| 1644 | write_unlock_irq(&bad_naa_lock); | ||
| 1645 | } | ||
| 1646 | } | ||
| 1647 | #endif | ||
| 1648 | 1540 | ||
| 1649 | /* Strip the SNAP header from ARP packets since we don't | 1541 | /* Strip the SNAP header from ARP packets since we don't | 
| 1650 | * pass them through to the 802.2/SNAP layers. | 1542 | * pass them through to the 802.2/SNAP layers. | 
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 9f9354fd3516..d62fd4f6b52e 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c  | |||
| @@ -1760,10 +1760,9 @@ mptscsih_get_tm_timeout(MPT_ADAPTER *ioc) | |||
| 1760 | case FC: | 1760 | case FC: | 
| 1761 | return 40; | 1761 | return 40; | 
| 1762 | case SAS: | 1762 | case SAS: | 
| 1763 | return 10; | ||
| 1764 | case SPI: | 1763 | case SPI: | 
| 1765 | default: | 1764 | default: | 
| 1766 | return 2; | 1765 | return 10; | 
| 1767 | } | 1766 | } | 
| 1768 | } | 1767 | } | 
| 1769 | 1768 | ||
diff --git a/drivers/message/i2o/Makefile b/drivers/message/i2o/Makefile index 2c2e39aa1efa..b0982dacfd0a 100644 --- a/drivers/message/i2o/Makefile +++ b/drivers/message/i2o/Makefile  | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | # In the future, some of these should be built conditionally. | 5 | # In the future, some of these should be built conditionally. | 
| 6 | # | 6 | # | 
| 7 | 7 | ||
| 8 | i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o | 8 | i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o memory.o | 
| 9 | i2o_bus-y += bus-osm.o | 9 | i2o_bus-y += bus-osm.o | 
| 10 | i2o_config-y += config-osm.o | 10 | i2o_config-y += config-osm.o | 
| 11 | obj-$(CONFIG_I2O) += i2o_core.o | 11 | obj-$(CONFIG_I2O) += i2o_core.o | 
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c index 8774c670e668..54c2e9ae23e5 100644 --- a/drivers/message/i2o/device.c +++ b/drivers/message/i2o/device.c  | |||
| @@ -467,7 +467,7 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, | |||
| 467 | 467 | ||
| 468 | res.virt = NULL; | 468 | res.virt = NULL; | 
| 469 | 469 | ||
| 470 | if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL)) | 470 | if (i2o_dma_alloc(dev, &res, reslen)) | 
| 471 | return -ENOMEM; | 471 | return -ENOMEM; | 
| 472 | 472 | ||
| 473 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | 473 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | 
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c index 6cbcc21de518..56faef1a1d55 100644 --- a/drivers/message/i2o/exec-osm.c +++ b/drivers/message/i2o/exec-osm.c  | |||
| @@ -388,8 +388,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind) | |||
| 388 | 388 | ||
| 389 | dev = &c->pdev->dev; | 389 | dev = &c->pdev->dev; | 
| 390 | 390 | ||
| 391 | if (i2o_dma_realloc | 391 | if (i2o_dma_realloc(dev, &c->dlct, | 
| 392 | (dev, &c->dlct, le32_to_cpu(sb->expected_lct_size), GFP_KERNEL)) | 392 | le32_to_cpu(sb->expected_lct_size))) | 
| 393 | return -ENOMEM; | 393 | return -ENOMEM; | 
| 394 | 394 | ||
| 395 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | 395 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | 
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 81483de8c0fd..84bdc2ee69e6 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c  | |||
| @@ -567,17 +567,17 @@ static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls, | |||
| 567 | 567 | ||
| 568 | /** | 568 | /** | 
| 569 | * i2o_block_open - Open the block device | 569 | * i2o_block_open - Open the block device | 
| 570 | * @inode: inode for block device being opened | 570 | * @bdev: block device being opened | 
| 571 | * @file: file to open | 571 | * @mode: file open mode | 
| 572 | * | 572 | * | 
| 573 | * Power up the device, mount and lock the media. This function is called, | 573 | * Power up the device, mount and lock the media. This function is called, | 
| 574 | * if the block device is opened for access. | 574 | * if the block device is opened for access. | 
| 575 | * | 575 | * | 
| 576 | * Returns 0 on success or negative error code on failure. | 576 | * Returns 0 on success or negative error code on failure. | 
| 577 | */ | 577 | */ | 
| 578 | static int i2o_block_open(struct inode *inode, struct file *file) | 578 | static int i2o_block_open(struct block_device *bdev, fmode_t mode) | 
| 579 | { | 579 | { | 
| 580 | struct i2o_block_device *dev = inode->i_bdev->bd_disk->private_data; | 580 | struct i2o_block_device *dev = bdev->bd_disk->private_data; | 
| 581 | 581 | ||
| 582 | if (!dev->i2o_dev) | 582 | if (!dev->i2o_dev) | 
| 583 | return -ENODEV; | 583 | return -ENODEV; | 
| @@ -596,17 +596,16 @@ static int i2o_block_open(struct inode *inode, struct file *file) | |||
| 596 | 596 | ||
| 597 | /** | 597 | /** | 
| 598 | * i2o_block_release - Release the I2O block device | 598 | * i2o_block_release - Release the I2O block device | 
| 599 | * @inode: inode for block device being released | 599 | * @disk: gendisk device being released | 
| 600 | * @file: file to close | 600 | * @mode: file open mode | 
| 601 | * | 601 | * | 
| 602 | * Unlock and unmount the media, and power down the device. Gets called if | 602 | * Unlock and unmount the media, and power down the device. Gets called if | 
| 603 | * the block device is closed. | 603 | * the block device is closed. | 
| 604 | * | 604 | * | 
| 605 | * Returns 0 on success or negative error code on failure. | 605 | * Returns 0 on success or negative error code on failure. | 
| 606 | */ | 606 | */ | 
| 607 | static int i2o_block_release(struct inode *inode, struct file *file) | 607 | static int i2o_block_release(struct gendisk *disk, fmode_t mode) | 
| 608 | { | 608 | { | 
| 609 | struct gendisk *disk = inode->i_bdev->bd_disk; | ||
| 610 | struct i2o_block_device *dev = disk->private_data; | 609 | struct i2o_block_device *dev = disk->private_data; | 
| 611 | u8 operation; | 610 | u8 operation; | 
| 612 | 611 | ||
| @@ -644,8 +643,8 @@ static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |||
| 644 | 643 | ||
| 645 | /** | 644 | /** | 
| 646 | * i2o_block_ioctl - Issue device specific ioctl calls. | 645 | * i2o_block_ioctl - Issue device specific ioctl calls. | 
| 647 | * @inode: inode for block device ioctl | 646 | * @bdev: block device being opened | 
| 648 | * @file: file for ioctl | 647 | * @mode: file open mode | 
| 649 | * @cmd: ioctl command | 648 | * @cmd: ioctl command | 
| 650 | * @arg: arg | 649 | * @arg: arg | 
| 651 | * | 650 | * | 
| @@ -653,10 +652,10 @@ static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |||
| 653 | * | 652 | * | 
| 654 | * Return 0 on success or negative error on failure. | 653 | * Return 0 on success or negative error on failure. | 
| 655 | */ | 654 | */ | 
| 656 | static int i2o_block_ioctl(struct inode *inode, struct file *file, | 655 | static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode, | 
| 657 | unsigned int cmd, unsigned long arg) | 656 | unsigned int cmd, unsigned long arg) | 
| 658 | { | 657 | { | 
| 659 | struct gendisk *disk = inode->i_bdev->bd_disk; | 658 | struct gendisk *disk = bdev->bd_disk; | 
| 660 | struct i2o_block_device *dev = disk->private_data; | 659 | struct i2o_block_device *dev = disk->private_data; | 
| 661 | 660 | ||
| 662 | /* Anyone capable of this syscall can do *real bad* things */ | 661 | /* Anyone capable of this syscall can do *real bad* things */ | 
| @@ -933,7 +932,7 @@ static struct block_device_operations i2o_block_fops = { | |||
| 933 | .owner = THIS_MODULE, | 932 | .owner = THIS_MODULE, | 
| 934 | .open = i2o_block_open, | 933 | .open = i2o_block_open, | 
| 935 | .release = i2o_block_release, | 934 | .release = i2o_block_release, | 
| 936 | .ioctl = i2o_block_ioctl, | 935 | .locked_ioctl = i2o_block_ioctl, | 
| 937 | .getgeo = i2o_block_getgeo, | 936 | .getgeo = i2o_block_getgeo, | 
| 938 | .media_changed = i2o_block_media_changed | 937 | .media_changed = i2o_block_media_changed | 
| 939 | }; | 938 | }; | 
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c index 4238de98d4a6..f3384c32b9a1 100644 --- a/drivers/message/i2o/i2o_config.c +++ b/drivers/message/i2o/i2o_config.c  | |||
| @@ -260,7 +260,7 @@ static int i2o_cfg_swdl(unsigned long arg) | |||
| 260 | if (IS_ERR(msg)) | 260 | if (IS_ERR(msg)) | 
| 261 | return PTR_ERR(msg); | 261 | return PTR_ERR(msg); | 
| 262 | 262 | ||
| 263 | if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { | 263 | if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) { | 
| 264 | i2o_msg_nop(c, msg); | 264 | i2o_msg_nop(c, msg); | 
| 265 | return -ENOMEM; | 265 | return -ENOMEM; | 
| 266 | } | 266 | } | 
| @@ -339,7 +339,7 @@ static int i2o_cfg_swul(unsigned long arg) | |||
| 339 | if (IS_ERR(msg)) | 339 | if (IS_ERR(msg)) | 
| 340 | return PTR_ERR(msg); | 340 | return PTR_ERR(msg); | 
| 341 | 341 | ||
| 342 | if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { | 342 | if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) { | 
| 343 | i2o_msg_nop(c, msg); | 343 | i2o_msg_nop(c, msg); | 
| 344 | return -ENOMEM; | 344 | return -ENOMEM; | 
| 345 | } | 345 | } | 
| @@ -634,9 +634,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, | |||
| 634 | sg_size = sg[i].flag_count & 0xffffff; | 634 | sg_size = sg[i].flag_count & 0xffffff; | 
| 635 | p = &(sg_list[sg_index]); | 635 | p = &(sg_list[sg_index]); | 
| 636 | /* Allocate memory for the transfer */ | 636 | /* Allocate memory for the transfer */ | 
| 637 | if (i2o_dma_alloc | 637 | if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) { | 
| 638 | (&c->pdev->dev, p, sg_size, | ||
| 639 | PCI_DMA_BIDIRECTIONAL)) { | ||
| 640 | printk(KERN_DEBUG | 638 | printk(KERN_DEBUG | 
| 641 | "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", | 639 | "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", | 
| 642 | c->name, sg_size, i, sg_count); | 640 | c->name, sg_size, i, sg_count); | 
| @@ -780,12 +778,11 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
| 780 | u32 size = 0; | 778 | u32 size = 0; | 
| 781 | u32 reply_size = 0; | 779 | u32 reply_size = 0; | 
| 782 | u32 rcode = 0; | 780 | u32 rcode = 0; | 
| 783 | void *sg_list[SG_TABLESIZE]; | 781 | struct i2o_dma sg_list[SG_TABLESIZE]; | 
| 784 | u32 sg_offset = 0; | 782 | u32 sg_offset = 0; | 
| 785 | u32 sg_count = 0; | 783 | u32 sg_count = 0; | 
| 786 | int sg_index = 0; | 784 | int sg_index = 0; | 
| 787 | u32 i = 0; | 785 | u32 i = 0; | 
| 788 | void *p = NULL; | ||
| 789 | i2o_status_block *sb; | 786 | i2o_status_block *sb; | 
| 790 | struct i2o_message *msg; | 787 | struct i2o_message *msg; | 
| 791 | unsigned int iop; | 788 | unsigned int iop; | 
| @@ -842,6 +839,7 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
| 842 | memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); | 839 | memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); | 
| 843 | if (sg_offset) { | 840 | if (sg_offset) { | 
| 844 | struct sg_simple_element *sg; | 841 | struct sg_simple_element *sg; | 
| 842 | struct i2o_dma *p; | ||
| 845 | 843 | ||
| 846 | if (sg_offset * 4 >= size) { | 844 | if (sg_offset * 4 >= size) { | 
| 847 | rcode = -EFAULT; | 845 | rcode = -EFAULT; | 
| @@ -871,22 +869,22 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
| 871 | goto sg_list_cleanup; | 869 | goto sg_list_cleanup; | 
| 872 | } | 870 | } | 
| 873 | sg_size = sg[i].flag_count & 0xffffff; | 871 | sg_size = sg[i].flag_count & 0xffffff; | 
| 872 | p = &(sg_list[sg_index]); | ||
| 873 | if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) { | ||
| 874 | /* Allocate memory for the transfer */ | 874 | /* Allocate memory for the transfer */ | 
| 875 | p = kmalloc(sg_size, GFP_KERNEL); | ||
| 876 | if (!p) { | ||
| 877 | printk(KERN_DEBUG | 875 | printk(KERN_DEBUG | 
| 878 | "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", | 876 | "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", | 
| 879 | c->name, sg_size, i, sg_count); | 877 | c->name, sg_size, i, sg_count); | 
| 880 | rcode = -ENOMEM; | 878 | rcode = -ENOMEM; | 
| 881 | goto sg_list_cleanup; | 879 | goto sg_list_cleanup; | 
| 882 | } | 880 | } | 
| 883 | sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. | 881 | sg_index++; | 
| 884 | /* Copy in the user's SG buffer if necessary */ | 882 | /* Copy in the user's SG buffer if necessary */ | 
| 885 | if (sg[i]. | 883 | if (sg[i]. | 
| 886 | flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { | 884 | flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { | 
| 887 | // TODO 64bit fix | 885 | // TODO 64bit fix | 
| 888 | if (copy_from_user | 886 | if (copy_from_user | 
| 889 | (p, (void __user *)sg[i].addr_bus, | 887 | (p->virt, (void __user *)sg[i].addr_bus, | 
| 890 | sg_size)) { | 888 | sg_size)) { | 
| 891 | printk(KERN_DEBUG | 889 | printk(KERN_DEBUG | 
| 892 | "%s: Could not copy SG buf %d FROM user\n", | 890 | "%s: Could not copy SG buf %d FROM user\n", | 
| @@ -895,8 +893,7 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
| 895 | goto sg_list_cleanup; | 893 | goto sg_list_cleanup; | 
| 896 | } | 894 | } | 
| 897 | } | 895 | } | 
| 898 | //TODO 64bit fix | 896 | sg[i].addr_bus = p->phys; | 
| 899 | sg[i].addr_bus = virt_to_bus(p); | ||
| 900 | } | 897 | } | 
| 901 | } | 898 | } | 
| 902 | 899 | ||
| @@ -908,7 +905,7 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
| 908 | } | 905 | } | 
| 909 | 906 | ||
| 910 | if (sg_offset) { | 907 | if (sg_offset) { | 
| 911 | u32 rmsg[128]; | 908 | u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE]; | 
| 912 | /* Copy back the Scatter Gather buffers back to user space */ | 909 | /* Copy back the Scatter Gather buffers back to user space */ | 
| 913 | u32 j; | 910 | u32 j; | 
| 914 | // TODO 64bit fix | 911 | // TODO 64bit fix | 
| @@ -942,11 +939,11 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
| 942 | sg_size = sg[j].flag_count & 0xffffff; | 939 | sg_size = sg[j].flag_count & 0xffffff; | 
| 943 | // TODO 64bit fix | 940 | // TODO 64bit fix | 
| 944 | if (copy_to_user | 941 | if (copy_to_user | 
| 945 | ((void __user *)sg[j].addr_bus, sg_list[j], | 942 | ((void __user *)sg[j].addr_bus, sg_list[j].virt, | 
| 946 | sg_size)) { | 943 | sg_size)) { | 
| 947 | printk(KERN_WARNING | 944 | printk(KERN_WARNING | 
| 948 | "%s: Could not copy %p TO user %x\n", | 945 | "%s: Could not copy %p TO user %x\n", | 
| 949 | c->name, sg_list[j], | 946 | c->name, sg_list[j].virt, | 
| 950 | sg[j].addr_bus); | 947 | sg[j].addr_bus); | 
| 951 | rcode = -EFAULT; | 948 | rcode = -EFAULT; | 
| 952 | goto sg_list_cleanup; | 949 | goto sg_list_cleanup; | 
| @@ -973,7 +970,7 @@ sg_list_cleanup: | |||
| 973 | } | 970 | } | 
| 974 | 971 | ||
| 975 | for (i = 0; i < sg_index; i++) | 972 | for (i = 0; i < sg_index; i++) | 
| 976 | kfree(sg_list[i]); | 973 | i2o_dma_free(&c->pdev->dev, &sg_list[i]); | 
| 977 | 974 | ||
| 978 | cleanup: | 975 | cleanup: | 
| 979 | kfree(reply); | 976 | kfree(reply); | 
| @@ -1100,28 +1097,17 @@ static int cfg_fasync(int fd, struct file *fp, int on) | |||
| 1100 | static int cfg_release(struct inode *inode, struct file *file) | 1097 | static int cfg_release(struct inode *inode, struct file *file) | 
| 1101 | { | 1098 | { | 
| 1102 | ulong id = (ulong) file->private_data; | 1099 | ulong id = (ulong) file->private_data; | 
| 1103 | struct i2o_cfg_info *p1, *p2; | 1100 | struct i2o_cfg_info *p, **q; | 
| 1104 | unsigned long flags; | 1101 | unsigned long flags; | 
| 1105 | 1102 | ||
| 1106 | lock_kernel(); | 1103 | lock_kernel(); | 
| 1107 | p1 = p2 = NULL; | ||
| 1108 | |||
| 1109 | spin_lock_irqsave(&i2o_config_lock, flags); | 1104 | spin_lock_irqsave(&i2o_config_lock, flags); | 
| 1110 | for (p1 = open_files; p1;) { | 1105 | for (q = &open_files; (p = *q) != NULL; q = &p->next) { | 
| 1111 | if (p1->q_id == id) { | 1106 | if (p->q_id == id) { | 
| 1112 | 1107 | *q = p->next; | |
| 1113 | if (p1->fasync) | 1108 | kfree(p); | 
| 1114 | cfg_fasync(-1, file, 0); | ||
| 1115 | if (p2) | ||
| 1116 | p2->next = p1->next; | ||
| 1117 | else | ||
| 1118 | open_files = p1->next; | ||
| 1119 | |||
| 1120 | kfree(p1); | ||
| 1121 | break; | 1109 | break; | 
| 1122 | } | 1110 | } | 
| 1123 | p2 = p1; | ||
| 1124 | p1 = p1->next; | ||
| 1125 | } | 1111 | } | 
| 1126 | spin_unlock_irqrestore(&i2o_config_lock, flags); | 1112 | spin_unlock_irqrestore(&i2o_config_lock, flags); | 
| 1127 | unlock_kernel(); | 1113 | unlock_kernel(); | 
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c index da715e11c1b2..be2b5926d26c 100644 --- a/drivers/message/i2o/iop.c +++ b/drivers/message/i2o/iop.c  | |||
| @@ -1004,7 +1004,7 @@ static int i2o_hrt_get(struct i2o_controller *c) | |||
| 1004 | 1004 | ||
| 1005 | size = hrt->num_entries * hrt->entry_len << 2; | 1005 | size = hrt->num_entries * hrt->entry_len << 2; | 
| 1006 | if (size > c->hrt.len) { | 1006 | if (size > c->hrt.len) { | 
| 1007 | if (i2o_dma_realloc(dev, &c->hrt, size, GFP_KERNEL)) | 1007 | if (i2o_dma_realloc(dev, &c->hrt, size)) | 
| 1008 | return -ENOMEM; | 1008 | return -ENOMEM; | 
| 1009 | else | 1009 | else | 
| 1010 | hrt = c->hrt.virt; | 1010 | hrt = c->hrt.virt; | 
diff --git a/drivers/message/i2o/memory.c b/drivers/message/i2o/memory.c new file mode 100644 index 000000000000..f5cc95c564e2 --- /dev/null +++ b/drivers/message/i2o/memory.c  | |||
| @@ -0,0 +1,313 @@ | |||
| 1 | /* | ||
| 2 | * Functions to handle I2O memory | ||
| 3 | * | ||
| 4 | * Pulled from the inlines in i2o headers and uninlined | ||
| 5 | * | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify it | ||
| 8 | * under the terms of the GNU General Public License as published by the | ||
| 9 | * Free Software Foundation; either version 2 of the License, or (at your | ||
| 10 | * option) any later version. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/module.h> | ||
| 14 | #include <linux/i2o.h> | ||
| 15 | #include <linux/delay.h> | ||
| 16 | #include <linux/string.h> | ||
| 17 | #include <linux/slab.h> | ||
| 18 | #include "core.h" | ||
| 19 | |||
| 20 | /* Protects our 32/64bit mask switching */ | ||
| 21 | static DEFINE_MUTEX(mem_lock); | ||
| 22 | |||
| 23 | /** | ||
| 24 | * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL | ||
| 25 | * @c: I2O controller for which the calculation should be done | ||
| 26 | * @body_size: maximum body size used for message in 32-bit words. | ||
| 27 | * | ||
| 28 | * Return the maximum number of SG elements in a SG list. | ||
| 29 | */ | ||
| 30 | u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size) | ||
| 31 | { | ||
| 32 | i2o_status_block *sb = c->status_block.virt; | ||
| 33 | u16 sg_count = | ||
| 34 | (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) - | ||
| 35 | body_size; | ||
| 36 | |||
| 37 | if (c->pae_support) { | ||
| 38 | /* | ||
| 39 | * for 64-bit a SG attribute element must be added and each | ||
| 40 | * SG element needs 12 bytes instead of 8. | ||
| 41 | */ | ||
| 42 | sg_count -= 2; | ||
| 43 | sg_count /= 3; | ||
| 44 | } else | ||
| 45 | sg_count /= 2; | ||
| 46 | |||
| 47 | if (c->short_req && (sg_count > 8)) | ||
| 48 | sg_count = 8; | ||
| 49 | |||
| 50 | return sg_count; | ||
| 51 | } | ||
| 52 | EXPORT_SYMBOL_GPL(i2o_sg_tablesize); | ||
| 53 | |||
| 54 | |||
| 55 | /** | ||
| 56 | * i2o_dma_map_single - Map pointer to controller and fill in I2O message. | ||
| 57 | * @c: I2O controller | ||
| 58 | * @ptr: pointer to the data which should be mapped | ||
| 59 | * @size: size of data in bytes | ||
| 60 | * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE | ||
| 61 | * @sg_ptr: pointer to the SG list inside the I2O message | ||
| 62 | * | ||
| 63 | * This function does all necessary DMA handling and also writes the I2O | ||
| 64 | * SGL elements into the I2O message. For details on DMA handling see also | ||
| 65 | * dma_map_single(). The pointer sg_ptr will only be set to the end of the | ||
| 66 | * SG list if the allocation was successful. | ||
| 67 | * | ||
| 68 | * Returns DMA address which must be checked for failures using | ||
| 69 | * dma_mapping_error(). | ||
| 70 | */ | ||
| 71 | dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, | ||
| 72 | size_t size, | ||
| 73 | enum dma_data_direction direction, | ||
| 74 | u32 ** sg_ptr) | ||
| 75 | { | ||
| 76 | u32 sg_flags; | ||
| 77 | u32 *mptr = *sg_ptr; | ||
| 78 | dma_addr_t dma_addr; | ||
| 79 | |||
| 80 | switch (direction) { | ||
| 81 | case DMA_TO_DEVICE: | ||
| 82 | sg_flags = 0xd4000000; | ||
| 83 | break; | ||
| 84 | case DMA_FROM_DEVICE: | ||
| 85 | sg_flags = 0xd0000000; | ||
| 86 | break; | ||
| 87 | default: | ||
| 88 | return 0; | ||
| 89 | } | ||
| 90 | |||
| 91 | dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); | ||
| 92 | if (!dma_mapping_error(&c->pdev->dev, dma_addr)) { | ||
| 93 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
| 94 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | ||
| 95 | *mptr++ = cpu_to_le32(0x7C020002); | ||
| 96 | *mptr++ = cpu_to_le32(PAGE_SIZE); | ||
| 97 | } | ||
| 98 | #endif | ||
| 99 | |||
| 100 | *mptr++ = cpu_to_le32(sg_flags | size); | ||
| 101 | *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr)); | ||
| 102 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
| 103 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) | ||
| 104 | *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr)); | ||
| 105 | #endif | ||
| 106 | *sg_ptr = mptr; | ||
| 107 | } | ||
| 108 | return dma_addr; | ||
| 109 | } | ||
| 110 | EXPORT_SYMBOL_GPL(i2o_dma_map_single); | ||
| 111 | |||
| 112 | /** | ||
| 113 | * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message. | ||
| 114 | * @c: I2O controller | ||
| 115 | * @sg: SG list to be mapped | ||
| 116 | * @sg_count: number of elements in the SG list | ||
| 117 | * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE | ||
| 118 | * @sg_ptr: pointer to the SG list inside the I2O message | ||
| 119 | * | ||
| 120 | * This function does all necessary DMA handling and also writes the I2O | ||
| 121 | * SGL elements into the I2O message. For details on DMA handling see also | ||
| 122 | * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG | ||
| 123 | * list if the allocation was successful. | ||
| 124 | * | ||
| 125 | * Returns 0 on failure or 1 on success. | ||
| 126 | */ | ||
| 127 | int i2o_dma_map_sg(struct i2o_controller *c, struct scatterlist *sg, | ||
| 128 | int sg_count, enum dma_data_direction direction, u32 ** sg_ptr) | ||
| 129 | { | ||
| 130 | u32 sg_flags; | ||
| 131 | u32 *mptr = *sg_ptr; | ||
| 132 | |||
| 133 | switch (direction) { | ||
| 134 | case DMA_TO_DEVICE: | ||
| 135 | sg_flags = 0x14000000; | ||
| 136 | break; | ||
| 137 | case DMA_FROM_DEVICE: | ||
| 138 | sg_flags = 0x10000000; | ||
| 139 | break; | ||
| 140 | default: | ||
| 141 | return 0; | ||
| 142 | } | ||
| 143 | |||
| 144 | sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction); | ||
| 145 | if (!sg_count) | ||
| 146 | return 0; | ||
| 147 | |||
| 148 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
| 149 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | ||
| 150 | *mptr++ = cpu_to_le32(0x7C020002); | ||
| 151 | *mptr++ = cpu_to_le32(PAGE_SIZE); | ||
| 152 | } | ||
| 153 | #endif | ||
| 154 | |||
| 155 | while (sg_count-- > 0) { | ||
| 156 | if (!sg_count) | ||
| 157 | sg_flags |= 0xC0000000; | ||
| 158 | *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg)); | ||
| 159 | *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg))); | ||
| 160 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
| 161 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) | ||
| 162 | *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); | ||
| 163 | #endif | ||
| 164 | sg = sg_next(sg); | ||
| 165 | } | ||
| 166 | *sg_ptr = mptr; | ||
| 167 | |||
| 168 | return 1; | ||
| 169 | } | ||
| 170 | EXPORT_SYMBOL_GPL(i2o_dma_map_sg); | ||
| 171 | |||
| 172 | /** | ||
| 173 | * i2o_dma_alloc - Allocate DMA memory | ||
| 174 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
| 175 | * @addr: i2o_dma struct which should get the DMA buffer | ||
| 176 | * @len: length of the new DMA memory | ||
| 177 | * | ||
| 178 | * Allocate a coherent DMA memory and write the pointers into addr. | ||
| 179 | * | ||
| 180 | * Returns 0 on success or -ENOMEM on failure. | ||
| 181 | */ | ||
| 182 | int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len) | ||
| 183 | { | ||
| 184 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 185 | int dma_64 = 0; | ||
| 186 | |||
| 187 | mutex_lock(&mem_lock); | ||
| 188 | if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) { | ||
| 189 | dma_64 = 1; | ||
| 190 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { | ||
| 191 | mutex_unlock(&mem_lock); | ||
| 192 | return -ENOMEM; | ||
| 193 | } | ||
| 194 | } | ||
| 195 | |||
| 196 | addr->virt = dma_alloc_coherent(dev, len, &addr->phys, GFP_KERNEL); | ||
| 197 | |||
| 198 | if ((sizeof(dma_addr_t) > 4) && dma_64) | ||
| 199 | if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)) | ||
| 200 | printk(KERN_WARNING "i2o: unable to set 64-bit DMA"); | ||
| 201 | mutex_unlock(&mem_lock); | ||
| 202 | |||
| 203 | if (!addr->virt) | ||
| 204 | return -ENOMEM; | ||
| 205 | |||
| 206 | memset(addr->virt, 0, len); | ||
| 207 | addr->len = len; | ||
| 208 | |||
| 209 | return 0; | ||
| 210 | } | ||
| 211 | EXPORT_SYMBOL_GPL(i2o_dma_alloc); | ||
| 212 | |||
| 213 | |||
| 214 | /** | ||
| 215 | * i2o_dma_free - Free DMA memory | ||
| 216 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
| 217 | * @addr: i2o_dma struct which contains the DMA buffer | ||
| 218 | * | ||
| 219 | * Free a coherent DMA memory and set virtual address of addr to NULL. | ||
| 220 | */ | ||
| 221 | void i2o_dma_free(struct device *dev, struct i2o_dma *addr) | ||
| 222 | { | ||
| 223 | if (addr->virt) { | ||
| 224 | if (addr->phys) | ||
| 225 | dma_free_coherent(dev, addr->len, addr->virt, | ||
| 226 | addr->phys); | ||
| 227 | else | ||
| 228 | kfree(addr->virt); | ||
| 229 | addr->virt = NULL; | ||
| 230 | } | ||
| 231 | } | ||
| 232 | EXPORT_SYMBOL_GPL(i2o_dma_free); | ||
| 233 | |||
| 234 | |||
| 235 | /** | ||
| 236 | * i2o_dma_realloc - Realloc DMA memory | ||
| 237 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
| 238 | * @addr: pointer to a i2o_dma struct DMA buffer | ||
| 239 | * @len: new length of memory | ||
| 240 | * | ||
| 241 | * If there was something allocated in the addr, free it first. If len > 0 | ||
| 242 | * than try to allocate it and write the addresses back to the addr | ||
| 243 | * structure. If len == 0 set the virtual address to NULL. | ||
| 244 | * | ||
| 245 | * Returns the 0 on success or negative error code on failure. | ||
| 246 | */ | ||
| 247 | int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len) | ||
| 248 | { | ||
| 249 | i2o_dma_free(dev, addr); | ||
| 250 | |||
| 251 | if (len) | ||
| 252 | return i2o_dma_alloc(dev, addr, len); | ||
| 253 | |||
| 254 | return 0; | ||
| 255 | } | ||
| 256 | EXPORT_SYMBOL_GPL(i2o_dma_realloc); | ||
| 257 | |||
| 258 | /* | ||
| 259 | * i2o_pool_alloc - Allocate an slab cache and mempool | ||
| 260 | * @mempool: pointer to struct i2o_pool to write data into. | ||
| 261 | * @name: name which is used to identify cache | ||
| 262 | * @size: size of each object | ||
| 263 | * @min_nr: minimum number of objects | ||
| 264 | * | ||
| 265 | * First allocates a slab cache with name and size. Then allocates a | ||
| 266 | * mempool which uses the slab cache for allocation and freeing. | ||
| 267 | * | ||
| 268 | * Returns 0 on success or negative error code on failure. | ||
| 269 | */ | ||
| 270 | int i2o_pool_alloc(struct i2o_pool *pool, const char *name, | ||
| 271 | size_t size, int min_nr) | ||
| 272 | { | ||
| 273 | pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL); | ||
| 274 | if (!pool->name) | ||
| 275 | goto exit; | ||
| 276 | strcpy(pool->name, name); | ||
| 277 | |||
| 278 | pool->slab = | ||
| 279 | kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL); | ||
| 280 | if (!pool->slab) | ||
| 281 | goto free_name; | ||
| 282 | |||
| 283 | pool->mempool = mempool_create_slab_pool(min_nr, pool->slab); | ||
| 284 | if (!pool->mempool) | ||
| 285 | goto free_slab; | ||
| 286 | |||
| 287 | return 0; | ||
| 288 | |||
| 289 | free_slab: | ||
| 290 | kmem_cache_destroy(pool->slab); | ||
| 291 | |||
| 292 | free_name: | ||
| 293 | kfree(pool->name); | ||
| 294 | |||
| 295 | exit: | ||
| 296 | return -ENOMEM; | ||
| 297 | } | ||
| 298 | EXPORT_SYMBOL_GPL(i2o_pool_alloc); | ||
| 299 | |||
| 300 | /* | ||
| 301 | * i2o_pool_free - Free slab cache and mempool again | ||
| 302 | * @mempool: pointer to struct i2o_pool which should be freed | ||
| 303 | * | ||
| 304 | * Note that you have to return all objects to the mempool again before | ||
| 305 | * calling i2o_pool_free(). | ||
| 306 | */ | ||
| 307 | void i2o_pool_free(struct i2o_pool *pool) | ||
| 308 | { | ||
| 309 | mempool_destroy(pool->mempool); | ||
| 310 | kmem_cache_destroy(pool->slab); | ||
| 311 | kfree(pool->name); | ||
| 312 | }; | ||
| 313 | EXPORT_SYMBOL_GPL(i2o_pool_free); | ||
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c index 685a89547a51..610ef1204e68 100644 --- a/drivers/message/i2o/pci.c +++ b/drivers/message/i2o/pci.c  | |||
| @@ -186,31 +186,29 @@ static int __devinit i2o_pci_alloc(struct i2o_controller *c) | |||
| 186 | } | 186 | } | 
| 187 | } | 187 | } | 
| 188 | 188 | ||
| 189 | if (i2o_dma_alloc(dev, &c->status, 8, GFP_KERNEL)) { | 189 | if (i2o_dma_alloc(dev, &c->status, 8)) { | 
| 190 | i2o_pci_free(c); | 190 | i2o_pci_free(c); | 
| 191 | return -ENOMEM; | 191 | return -ENOMEM; | 
| 192 | } | 192 | } | 
| 193 | 193 | ||
| 194 | if (i2o_dma_alloc(dev, &c->hrt, sizeof(i2o_hrt), GFP_KERNEL)) { | 194 | if (i2o_dma_alloc(dev, &c->hrt, sizeof(i2o_hrt))) { | 
| 195 | i2o_pci_free(c); | 195 | i2o_pci_free(c); | 
| 196 | return -ENOMEM; | 196 | return -ENOMEM; | 
| 197 | } | 197 | } | 
| 198 | 198 | ||
| 199 | if (i2o_dma_alloc(dev, &c->dlct, 8192, GFP_KERNEL)) { | 199 | if (i2o_dma_alloc(dev, &c->dlct, 8192)) { | 
| 200 | i2o_pci_free(c); | 200 | i2o_pci_free(c); | 
| 201 | return -ENOMEM; | 201 | return -ENOMEM; | 
| 202 | } | 202 | } | 
| 203 | 203 | ||
| 204 | if (i2o_dma_alloc(dev, &c->status_block, sizeof(i2o_status_block), | 204 | if (i2o_dma_alloc(dev, &c->status_block, sizeof(i2o_status_block))) { | 
| 205 | GFP_KERNEL)) { | ||
| 206 | i2o_pci_free(c); | 205 | i2o_pci_free(c); | 
| 207 | return -ENOMEM; | 206 | return -ENOMEM; | 
| 208 | } | 207 | } | 
| 209 | 208 | ||
| 210 | if (i2o_dma_alloc | 209 | if (i2o_dma_alloc(dev, &c->out_queue, | 
| 211 | (dev, &c->out_queue, | 210 | I2O_MAX_OUTBOUND_MSG_FRAMES * I2O_OUTBOUND_MSG_FRAME_SIZE * | 
| 212 | I2O_MAX_OUTBOUND_MSG_FRAMES * I2O_OUTBOUND_MSG_FRAME_SIZE * | 211 | sizeof(u32))) { | 
| 213 | sizeof(u32), GFP_KERNEL)) { | ||
| 214 | i2o_pci_free(c); | 212 | i2o_pci_free(c); | 
| 215 | return -ENOMEM; | 213 | return -ENOMEM; | 
| 216 | } | 214 | } | 
