diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-07-26 18:04:59 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-07-26 18:04:59 -0400 |
commit | d9ecdb282c91952796b7542c4f57fd6de6948d7b (patch) | |
tree | fd4de7923968afa7d2981fb037e2255fc2cfa1e1 /drivers | |
parent | 4ef584ba84125b67c17b5aded38e7783cd8cdef0 (diff) | |
parent | 1d1f8b377c48e5aeddaea52eba74cc0539f088cd (diff) |
Merge branch 'for_rmk_13' of git://git.mnementh.co.uk/linux-2.6-im
Diffstat (limited to 'drivers')
69 files changed, 620 insertions, 459 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index d592dbb1d12a..b7f2963693a7 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -272,6 +272,8 @@ static atomic_t c3_cpu_count; | |||
272 | /* Common C-state entry for C2, C3, .. */ | 272 | /* Common C-state entry for C2, C3, .. */ |
273 | static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | 273 | static void acpi_cstate_enter(struct acpi_processor_cx *cstate) |
274 | { | 274 | { |
275 | /* Don't trace irqs off for idle */ | ||
276 | stop_critical_timings(); | ||
275 | if (cstate->entry_method == ACPI_CSTATE_FFH) { | 277 | if (cstate->entry_method == ACPI_CSTATE_FFH) { |
276 | /* Call into architectural FFH based C-state */ | 278 | /* Call into architectural FFH based C-state */ |
277 | acpi_processor_ffh_cstate_enter(cstate); | 279 | acpi_processor_ffh_cstate_enter(cstate); |
@@ -284,6 +286,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | |||
284 | gets asserted in time to freeze execution properly. */ | 286 | gets asserted in time to freeze execution properly. */ |
285 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | 287 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); |
286 | } | 288 | } |
289 | start_critical_timings(); | ||
287 | } | 290 | } |
288 | #endif /* !CONFIG_CPU_IDLE */ | 291 | #endif /* !CONFIG_CPU_IDLE */ |
289 | 292 | ||
@@ -1418,6 +1421,8 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, | |||
1418 | */ | 1421 | */ |
1419 | static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | 1422 | static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) |
1420 | { | 1423 | { |
1424 | /* Don't trace irqs off for idle */ | ||
1425 | stop_critical_timings(); | ||
1421 | if (cx->entry_method == ACPI_CSTATE_FFH) { | 1426 | if (cx->entry_method == ACPI_CSTATE_FFH) { |
1422 | /* Call into architectural FFH based C-state */ | 1427 | /* Call into architectural FFH based C-state */ |
1423 | acpi_processor_ffh_cstate_enter(cx); | 1428 | acpi_processor_ffh_cstate_enter(cx); |
@@ -1432,6 +1437,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | |||
1432 | gets asserted in time to freeze execution properly. */ | 1437 | gets asserted in time to freeze execution properly. */ |
1433 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | 1438 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); |
1434 | } | 1439 | } |
1440 | start_critical_timings(); | ||
1435 | } | 1441 | } |
1436 | 1442 | ||
1437 | /** | 1443 | /** |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 7d5c63c81a59..068aa1c9538c 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -116,12 +116,10 @@ static void device_release(struct kobject *kobj) | |||
116 | dev->type->release(dev); | 116 | dev->type->release(dev); |
117 | else if (dev->class && dev->class->dev_release) | 117 | else if (dev->class && dev->class->dev_release) |
118 | dev->class->dev_release(dev); | 118 | dev->class->dev_release(dev); |
119 | else { | 119 | else |
120 | printk(KERN_ERR "Device '%s' does not have a release() " | 120 | WARN(1, KERN_ERR "Device '%s' does not have a release() " |
121 | "function, it is broken and must be fixed.\n", | 121 | "function, it is broken and must be fixed.\n", |
122 | dev->bus_id); | 122 | dev->bus_id); |
123 | WARN_ON(1); | ||
124 | } | ||
125 | } | 123 | } |
126 | 124 | ||
127 | static struct kobj_type device_ktype = { | 125 | static struct kobj_type device_ktype = { |
diff --git a/drivers/base/isa.c b/drivers/base/isa.c index d2222397a401..efd577574948 100644 --- a/drivers/base/isa.c +++ b/drivers/base/isa.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/slab.h> | 7 | #include <linux/slab.h> |
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
10 | #include <linux/dma-mapping.h> | ||
10 | #include <linux/isa.h> | 11 | #include <linux/isa.h> |
11 | 12 | ||
12 | static struct device isa_bus = { | 13 | static struct device isa_bus = { |
@@ -141,6 +142,9 @@ int isa_register_driver(struct isa_driver *isa_driver, unsigned int ndev) | |||
141 | isa_dev->dev.release = isa_dev_release; | 142 | isa_dev->dev.release = isa_dev_release; |
142 | isa_dev->id = id; | 143 | isa_dev->id = id; |
143 | 144 | ||
145 | isa_dev->dev.coherent_dma_mask = DMA_24BIT_MASK; | ||
146 | isa_dev->dev.dma_mask = &isa_dev->dev.coherent_dma_mask; | ||
147 | |||
144 | error = device_register(&isa_dev->dev); | 148 | error = device_register(&isa_dev->dev); |
145 | if (error) { | 149 | if (error) { |
146 | put_device(&isa_dev->dev); | 150 | put_device(&isa_dev->dev); |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 855ed1a9f97b..3ad49a00029f 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -204,9 +204,8 @@ memory_block_action(struct memory_block *mem, unsigned long action) | |||
204 | } | 204 | } |
205 | break; | 205 | break; |
206 | default: | 206 | default: |
207 | printk(KERN_WARNING "%s(%p, %ld) unknown action: %ld\n", | 207 | WARN(1, KERN_WARNING "%s(%p, %ld) unknown action: %ld\n", |
208 | __func__, mem, action, action); | 208 | __func__, mem, action, action); |
209 | WARN_ON(1); | ||
210 | ret = -EINVAL; | 209 | ret = -EINVAL; |
211 | } | 210 | } |
212 | 211 | ||
diff --git a/drivers/base/sys.c b/drivers/base/sys.c index 40fc14f03540..75dd6e22faff 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c | |||
@@ -168,19 +168,16 @@ int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv) | |||
168 | int err = 0; | 168 | int err = 0; |
169 | 169 | ||
170 | if (!cls) { | 170 | if (!cls) { |
171 | printk(KERN_WARNING "sysdev: invalid class passed to " | 171 | WARN(1, KERN_WARNING "sysdev: invalid class passed to " |
172 | "sysdev_driver_register!\n"); | 172 | "sysdev_driver_register!\n"); |
173 | WARN_ON(1); | ||
174 | return -EINVAL; | 173 | return -EINVAL; |
175 | } | 174 | } |
176 | 175 | ||
177 | /* Check whether this driver has already been added to a class. */ | 176 | /* Check whether this driver has already been added to a class. */ |
178 | if (drv->entry.next && !list_empty(&drv->entry)) { | 177 | if (drv->entry.next && !list_empty(&drv->entry)) |
179 | printk(KERN_WARNING "sysdev: class %s: driver (%p) has already" | 178 | WARN(1, KERN_WARNING "sysdev: class %s: driver (%p) has already" |
180 | " been registered to a class, something is wrong, but " | 179 | " been registered to a class, something is wrong, but " |
181 | "will forge on!\n", cls->name, drv); | 180 | "will forge on!\n", cls->name, drv); |
182 | WARN_ON(1); | ||
183 | } | ||
184 | 181 | ||
185 | mutex_lock(&sysdev_drivers_lock); | 182 | mutex_lock(&sysdev_drivers_lock); |
186 | if (cls && kset_get(&cls->kset)) { | 183 | if (cls && kset_get(&cls->kset)) { |
@@ -194,8 +191,7 @@ int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv) | |||
194 | } | 191 | } |
195 | } else { | 192 | } else { |
196 | err = -EINVAL; | 193 | err = -EINVAL; |
197 | printk(KERN_ERR "%s: invalid device class\n", __func__); | 194 | WARN(1, KERN_ERR "%s: invalid device class\n", __func__); |
198 | WARN_ON(1); | ||
199 | } | 195 | } |
200 | mutex_unlock(&sysdev_drivers_lock); | 196 | mutex_unlock(&sysdev_drivers_lock); |
201 | return err; | 197 | return err; |
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c index 19b88504e960..ca7c72a486b2 100644 --- a/drivers/char/dsp56k.c +++ b/drivers/char/dsp56k.c | |||
@@ -304,9 +304,9 @@ static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t co | |||
304 | } | 304 | } |
305 | 305 | ||
306 | static long dsp56k_ioctl(struct file *file, unsigned int cmd, | 306 | static long dsp56k_ioctl(struct file *file, unsigned int cmd, |
307 | unsigned long arg) | 307 | unsigned long arg) |
308 | { | 308 | { |
309 | int dev = iminor(inode) & 0x0f; | 309 | int dev = iminor(file->f_path.dentry->d_inode) & 0x0f; |
310 | void __user *argp = (void __user *)arg; | 310 | void __user *argp = (void __user *)arg; |
311 | 311 | ||
312 | switch(dev) | 312 | switch(dev) |
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index dbefbb30ed44..d9799e2bcfbf 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c | |||
@@ -144,6 +144,7 @@ static ssize_t rtc_read(struct file *file, char __user *buf, | |||
144 | size_t count, loff_t *ppos); | 144 | size_t count, loff_t *ppos); |
145 | 145 | ||
146 | static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg); | 146 | static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg); |
147 | static void rtc_get_rtc_time(struct rtc_time *rtc_tm); | ||
147 | 148 | ||
148 | #ifdef RTC_IRQ | 149 | #ifdef RTC_IRQ |
149 | static unsigned int rtc_poll(struct file *file, poll_table *wait); | 150 | static unsigned int rtc_poll(struct file *file, poll_table *wait); |
@@ -235,7 +236,7 @@ static inline unsigned char rtc_is_updating(void) | |||
235 | * (See ./arch/XXXX/kernel/time.c for the set_rtc_mmss() function.) | 236 | * (See ./arch/XXXX/kernel/time.c for the set_rtc_mmss() function.) |
236 | */ | 237 | */ |
237 | 238 | ||
238 | irqreturn_t rtc_interrupt(int irq, void *dev_id) | 239 | static irqreturn_t rtc_interrupt(int irq, void *dev_id) |
239 | { | 240 | { |
240 | /* | 241 | /* |
241 | * Can be an alarm interrupt, update complete interrupt, | 242 | * Can be an alarm interrupt, update complete interrupt, |
@@ -1303,7 +1304,7 @@ static int rtc_proc_open(struct inode *inode, struct file *file) | |||
1303 | } | 1304 | } |
1304 | #endif | 1305 | #endif |
1305 | 1306 | ||
1306 | void rtc_get_rtc_time(struct rtc_time *rtc_tm) | 1307 | static void rtc_get_rtc_time(struct rtc_time *rtc_tm) |
1307 | { | 1308 | { |
1308 | unsigned long uip_watchdog = jiffies, flags; | 1309 | unsigned long uip_watchdog = jiffies, flags; |
1309 | unsigned char ctrl; | 1310 | unsigned char ctrl; |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index e1fc193d9396..ae766d868454 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
@@ -580,91 +580,133 @@ void tpm_continue_selftest(struct tpm_chip *chip) | |||
580 | } | 580 | } |
581 | EXPORT_SYMBOL_GPL(tpm_continue_selftest); | 581 | EXPORT_SYMBOL_GPL(tpm_continue_selftest); |
582 | 582 | ||
583 | #define TPM_INTERNAL_RESULT_SIZE 200 | ||
584 | |||
583 | ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr, | 585 | ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr, |
584 | char *buf) | 586 | char *buf) |
585 | { | 587 | { |
586 | u8 data[max_t(int, ARRAY_SIZE(tpm_cap), 35)]; | 588 | u8 *data; |
587 | ssize_t rc; | 589 | ssize_t rc; |
588 | 590 | ||
589 | struct tpm_chip *chip = dev_get_drvdata(dev); | 591 | struct tpm_chip *chip = dev_get_drvdata(dev); |
590 | if (chip == NULL) | 592 | if (chip == NULL) |
591 | return -ENODEV; | 593 | return -ENODEV; |
592 | 594 | ||
595 | data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL); | ||
596 | if (!data) | ||
597 | return -ENOMEM; | ||
598 | |||
593 | memcpy(data, tpm_cap, sizeof(tpm_cap)); | 599 | memcpy(data, tpm_cap, sizeof(tpm_cap)); |
594 | data[TPM_CAP_IDX] = TPM_CAP_FLAG; | 600 | data[TPM_CAP_IDX] = TPM_CAP_FLAG; |
595 | data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_FLAG_PERM; | 601 | data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_FLAG_PERM; |
596 | 602 | ||
597 | rc = transmit_cmd(chip, data, sizeof(data), | 603 | rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE, |
598 | "attemtping to determine the permanent state"); | 604 | "attemtping to determine the permanent enabled state"); |
599 | if (rc) | 605 | if (rc) { |
606 | kfree(data); | ||
600 | return 0; | 607 | return 0; |
601 | return sprintf(buf, "%d\n", !data[TPM_GET_CAP_PERM_DISABLE_IDX]); | 608 | } |
609 | |||
610 | rc = sprintf(buf, "%d\n", !data[TPM_GET_CAP_PERM_DISABLE_IDX]); | ||
611 | |||
612 | kfree(data); | ||
613 | return rc; | ||
602 | } | 614 | } |
603 | EXPORT_SYMBOL_GPL(tpm_show_enabled); | 615 | EXPORT_SYMBOL_GPL(tpm_show_enabled); |
604 | 616 | ||
605 | ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr, | 617 | ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr, |
606 | char *buf) | 618 | char *buf) |
607 | { | 619 | { |
608 | u8 data[max_t(int, ARRAY_SIZE(tpm_cap), 35)]; | 620 | u8 *data; |
609 | ssize_t rc; | 621 | ssize_t rc; |
610 | 622 | ||
611 | struct tpm_chip *chip = dev_get_drvdata(dev); | 623 | struct tpm_chip *chip = dev_get_drvdata(dev); |
612 | if (chip == NULL) | 624 | if (chip == NULL) |
613 | return -ENODEV; | 625 | return -ENODEV; |
614 | 626 | ||
627 | data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL); | ||
628 | if (!data) | ||
629 | return -ENOMEM; | ||
630 | |||
615 | memcpy(data, tpm_cap, sizeof(tpm_cap)); | 631 | memcpy(data, tpm_cap, sizeof(tpm_cap)); |
616 | data[TPM_CAP_IDX] = TPM_CAP_FLAG; | 632 | data[TPM_CAP_IDX] = TPM_CAP_FLAG; |
617 | data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_FLAG_PERM; | 633 | data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_FLAG_PERM; |
618 | 634 | ||
619 | rc = transmit_cmd(chip, data, sizeof(data), | 635 | rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE, |
620 | "attemtping to determine the permanent state"); | 636 | "attemtping to determine the permanent active state"); |
621 | if (rc) | 637 | if (rc) { |
638 | kfree(data); | ||
622 | return 0; | 639 | return 0; |
623 | return sprintf(buf, "%d\n", !data[TPM_GET_CAP_PERM_INACTIVE_IDX]); | 640 | } |
641 | |||
642 | rc = sprintf(buf, "%d\n", !data[TPM_GET_CAP_PERM_INACTIVE_IDX]); | ||
643 | |||
644 | kfree(data); | ||
645 | return rc; | ||
624 | } | 646 | } |
625 | EXPORT_SYMBOL_GPL(tpm_show_active); | 647 | EXPORT_SYMBOL_GPL(tpm_show_active); |
626 | 648 | ||
627 | ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr, | 649 | ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr, |
628 | char *buf) | 650 | char *buf) |
629 | { | 651 | { |
630 | u8 data[sizeof(tpm_cap)]; | 652 | u8 *data; |
631 | ssize_t rc; | 653 | ssize_t rc; |
632 | 654 | ||
633 | struct tpm_chip *chip = dev_get_drvdata(dev); | 655 | struct tpm_chip *chip = dev_get_drvdata(dev); |
634 | if (chip == NULL) | 656 | if (chip == NULL) |
635 | return -ENODEV; | 657 | return -ENODEV; |
636 | 658 | ||
659 | data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL); | ||
660 | if (!data) | ||
661 | return -ENOMEM; | ||
662 | |||
637 | memcpy(data, tpm_cap, sizeof(tpm_cap)); | 663 | memcpy(data, tpm_cap, sizeof(tpm_cap)); |
638 | data[TPM_CAP_IDX] = TPM_CAP_PROP; | 664 | data[TPM_CAP_IDX] = TPM_CAP_PROP; |
639 | data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_OWNER; | 665 | data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_OWNER; |
640 | 666 | ||
641 | rc = transmit_cmd(chip, data, sizeof(data), | 667 | rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE, |
642 | "attempting to determine the owner state"); | 668 | "attempting to determine the owner state"); |
643 | if (rc) | 669 | if (rc) { |
670 | kfree(data); | ||
644 | return 0; | 671 | return 0; |
645 | return sprintf(buf, "%d\n", data[TPM_GET_CAP_RET_BOOL_1_IDX]); | 672 | } |
673 | |||
674 | rc = sprintf(buf, "%d\n", data[TPM_GET_CAP_RET_BOOL_1_IDX]); | ||
675 | |||
676 | kfree(data); | ||
677 | return rc; | ||
646 | } | 678 | } |
647 | EXPORT_SYMBOL_GPL(tpm_show_owned); | 679 | EXPORT_SYMBOL_GPL(tpm_show_owned); |
648 | 680 | ||
649 | ssize_t tpm_show_temp_deactivated(struct device * dev, | 681 | ssize_t tpm_show_temp_deactivated(struct device * dev, |
650 | struct device_attribute * attr, char *buf) | 682 | struct device_attribute * attr, char *buf) |
651 | { | 683 | { |
652 | u8 data[sizeof(tpm_cap)]; | 684 | u8 *data; |
653 | ssize_t rc; | 685 | ssize_t rc; |
654 | 686 | ||
655 | struct tpm_chip *chip = dev_get_drvdata(dev); | 687 | struct tpm_chip *chip = dev_get_drvdata(dev); |
656 | if (chip == NULL) | 688 | if (chip == NULL) |
657 | return -ENODEV; | 689 | return -ENODEV; |
658 | 690 | ||
691 | data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL); | ||
692 | if (!data) | ||
693 | return -ENOMEM; | ||
694 | |||
659 | memcpy(data, tpm_cap, sizeof(tpm_cap)); | 695 | memcpy(data, tpm_cap, sizeof(tpm_cap)); |
660 | data[TPM_CAP_IDX] = TPM_CAP_FLAG; | 696 | data[TPM_CAP_IDX] = TPM_CAP_FLAG; |
661 | data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_FLAG_VOL; | 697 | data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_FLAG_VOL; |
662 | 698 | ||
663 | rc = transmit_cmd(chip, data, sizeof(data), | 699 | rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE, |
664 | "attempting to determine the temporary state"); | 700 | "attempting to determine the temporary state"); |
665 | if (rc) | 701 | if (rc) { |
702 | kfree(data); | ||
666 | return 0; | 703 | return 0; |
667 | return sprintf(buf, "%d\n", data[TPM_GET_CAP_TEMP_INACTIVE_IDX]); | 704 | } |
705 | |||
706 | rc = sprintf(buf, "%d\n", data[TPM_GET_CAP_TEMP_INACTIVE_IDX]); | ||
707 | |||
708 | kfree(data); | ||
709 | return rc; | ||
668 | } | 710 | } |
669 | EXPORT_SYMBOL_GPL(tpm_show_temp_deactivated); | 711 | EXPORT_SYMBOL_GPL(tpm_show_temp_deactivated); |
670 | 712 | ||
@@ -678,7 +720,7 @@ static const u8 pcrread[] = { | |||
678 | ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr, | 720 | ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr, |
679 | char *buf) | 721 | char *buf) |
680 | { | 722 | { |
681 | u8 data[max_t(int, max(ARRAY_SIZE(tpm_cap), ARRAY_SIZE(pcrread)), 30)]; | 723 | u8 *data; |
682 | ssize_t rc; | 724 | ssize_t rc; |
683 | int i, j, num_pcrs; | 725 | int i, j, num_pcrs; |
684 | __be32 index; | 726 | __be32 index; |
@@ -688,21 +730,27 @@ ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr, | |||
688 | if (chip == NULL) | 730 | if (chip == NULL) |
689 | return -ENODEV; | 731 | return -ENODEV; |
690 | 732 | ||
733 | data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL); | ||
734 | if (!data) | ||
735 | return -ENOMEM; | ||
736 | |||
691 | memcpy(data, tpm_cap, sizeof(tpm_cap)); | 737 | memcpy(data, tpm_cap, sizeof(tpm_cap)); |
692 | data[TPM_CAP_IDX] = TPM_CAP_PROP; | 738 | data[TPM_CAP_IDX] = TPM_CAP_PROP; |
693 | data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_PCR; | 739 | data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_PCR; |
694 | 740 | ||
695 | rc = transmit_cmd(chip, data, sizeof(data), | 741 | rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE, |
696 | "attempting to determine the number of PCRS"); | 742 | "attempting to determine the number of PCRS"); |
697 | if (rc) | 743 | if (rc) { |
744 | kfree(data); | ||
698 | return 0; | 745 | return 0; |
746 | } | ||
699 | 747 | ||
700 | num_pcrs = be32_to_cpu(*((__be32 *) (data + 14))); | 748 | num_pcrs = be32_to_cpu(*((__be32 *) (data + 14))); |
701 | for (i = 0; i < num_pcrs; i++) { | 749 | for (i = 0; i < num_pcrs; i++) { |
702 | memcpy(data, pcrread, sizeof(pcrread)); | 750 | memcpy(data, pcrread, sizeof(pcrread)); |
703 | index = cpu_to_be32(i); | 751 | index = cpu_to_be32(i); |
704 | memcpy(data + 10, &index, 4); | 752 | memcpy(data + 10, &index, 4); |
705 | rc = transmit_cmd(chip, data, sizeof(data), | 753 | rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE, |
706 | "attempting to read a PCR"); | 754 | "attempting to read a PCR"); |
707 | if (rc) | 755 | if (rc) |
708 | goto out; | 756 | goto out; |
@@ -712,6 +760,7 @@ ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr, | |||
712 | str += sprintf(str, "\n"); | 760 | str += sprintf(str, "\n"); |
713 | } | 761 | } |
714 | out: | 762 | out: |
763 | kfree(data); | ||
715 | return str - buf; | 764 | return str - buf; |
716 | } | 765 | } |
717 | EXPORT_SYMBOL_GPL(tpm_show_pcrs); | 766 | EXPORT_SYMBOL_GPL(tpm_show_pcrs); |
@@ -795,7 +844,7 @@ static const u8 cap_version[] = { | |||
795 | ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr, | 844 | ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr, |
796 | char *buf) | 845 | char *buf) |
797 | { | 846 | { |
798 | u8 data[max_t(int, max(ARRAY_SIZE(tpm_cap), ARRAY_SIZE(cap_version)), 30)]; | 847 | u8 *data; |
799 | ssize_t rc; | 848 | ssize_t rc; |
800 | char *str = buf; | 849 | char *str = buf; |
801 | 850 | ||
@@ -803,21 +852,27 @@ ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr, | |||
803 | if (chip == NULL) | 852 | if (chip == NULL) |
804 | return -ENODEV; | 853 | return -ENODEV; |
805 | 854 | ||
855 | data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL); | ||
856 | if (!data) | ||
857 | return -ENOMEM; | ||
858 | |||
806 | memcpy(data, tpm_cap, sizeof(tpm_cap)); | 859 | memcpy(data, tpm_cap, sizeof(tpm_cap)); |
807 | data[TPM_CAP_IDX] = TPM_CAP_PROP; | 860 | data[TPM_CAP_IDX] = TPM_CAP_PROP; |
808 | data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_MANUFACTURER; | 861 | data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_MANUFACTURER; |
809 | 862 | ||
810 | rc = transmit_cmd(chip, data, sizeof(data), | 863 | rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE, |
811 | "attempting to determine the manufacturer"); | 864 | "attempting to determine the manufacturer"); |
812 | if (rc) | 865 | if (rc) { |
866 | kfree(data); | ||
813 | return 0; | 867 | return 0; |
868 | } | ||
814 | 869 | ||
815 | str += sprintf(str, "Manufacturer: 0x%x\n", | 870 | str += sprintf(str, "Manufacturer: 0x%x\n", |
816 | be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_UINT32_1_IDX)))); | 871 | be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_UINT32_1_IDX)))); |
817 | 872 | ||
818 | memcpy(data, cap_version, sizeof(cap_version)); | 873 | memcpy(data, cap_version, sizeof(cap_version)); |
819 | data[CAP_VERSION_IDX] = CAP_VERSION_1_1; | 874 | data[CAP_VERSION_IDX] = CAP_VERSION_1_1; |
820 | rc = transmit_cmd(chip, data, sizeof(data), | 875 | rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE, |
821 | "attempting to determine the 1.1 version"); | 876 | "attempting to determine the 1.1 version"); |
822 | if (rc) | 877 | if (rc) |
823 | goto out; | 878 | goto out; |
@@ -828,6 +883,7 @@ ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr, | |||
828 | (int) data[17]); | 883 | (int) data[17]); |
829 | 884 | ||
830 | out: | 885 | out: |
886 | kfree(data); | ||
831 | return str - buf; | 887 | return str - buf; |
832 | } | 888 | } |
833 | EXPORT_SYMBOL_GPL(tpm_show_caps); | 889 | EXPORT_SYMBOL_GPL(tpm_show_caps); |
@@ -835,7 +891,7 @@ EXPORT_SYMBOL_GPL(tpm_show_caps); | |||
835 | ssize_t tpm_show_caps_1_2(struct device * dev, | 891 | ssize_t tpm_show_caps_1_2(struct device * dev, |
836 | struct device_attribute * attr, char *buf) | 892 | struct device_attribute * attr, char *buf) |
837 | { | 893 | { |
838 | u8 data[max_t(int, max(ARRAY_SIZE(tpm_cap), ARRAY_SIZE(cap_version)), 30)]; | 894 | u8 *data; |
839 | ssize_t len; | 895 | ssize_t len; |
840 | char *str = buf; | 896 | char *str = buf; |
841 | 897 | ||
@@ -843,15 +899,20 @@ ssize_t tpm_show_caps_1_2(struct device * dev, | |||
843 | if (chip == NULL) | 899 | if (chip == NULL) |
844 | return -ENODEV; | 900 | return -ENODEV; |
845 | 901 | ||
902 | data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL); | ||
903 | if (!data) | ||
904 | return -ENOMEM; | ||
905 | |||
846 | memcpy(data, tpm_cap, sizeof(tpm_cap)); | 906 | memcpy(data, tpm_cap, sizeof(tpm_cap)); |
847 | data[TPM_CAP_IDX] = TPM_CAP_PROP; | 907 | data[TPM_CAP_IDX] = TPM_CAP_PROP; |
848 | data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_MANUFACTURER; | 908 | data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_MANUFACTURER; |
849 | 909 | ||
850 | if ((len = tpm_transmit(chip, data, sizeof(data))) <= | 910 | len = tpm_transmit(chip, data, TPM_INTERNAL_RESULT_SIZE); |
851 | TPM_ERROR_SIZE) { | 911 | if (len <= TPM_ERROR_SIZE) { |
852 | dev_dbg(chip->dev, "A TPM error (%d) occurred " | 912 | dev_dbg(chip->dev, "A TPM error (%d) occurred " |
853 | "attempting to determine the manufacturer\n", | 913 | "attempting to determine the manufacturer\n", |
854 | be32_to_cpu(*((__be32 *) (data + TPM_RET_CODE_IDX)))); | 914 | be32_to_cpu(*((__be32 *) (data + TPM_RET_CODE_IDX)))); |
915 | kfree(data); | ||
855 | return 0; | 916 | return 0; |
856 | } | 917 | } |
857 | 918 | ||
@@ -861,8 +922,8 @@ ssize_t tpm_show_caps_1_2(struct device * dev, | |||
861 | memcpy(data, cap_version, sizeof(cap_version)); | 922 | memcpy(data, cap_version, sizeof(cap_version)); |
862 | data[CAP_VERSION_IDX] = CAP_VERSION_1_2; | 923 | data[CAP_VERSION_IDX] = CAP_VERSION_1_2; |
863 | 924 | ||
864 | if ((len = tpm_transmit(chip, data, sizeof(data))) <= | 925 | len = tpm_transmit(chip, data, TPM_INTERNAL_RESULT_SIZE); |
865 | TPM_ERROR_SIZE) { | 926 | if (len <= TPM_ERROR_SIZE) { |
866 | dev_err(chip->dev, "A TPM error (%d) occurred " | 927 | dev_err(chip->dev, "A TPM error (%d) occurred " |
867 | "attempting to determine the 1.2 version\n", | 928 | "attempting to determine the 1.2 version\n", |
868 | be32_to_cpu(*((__be32 *) (data + TPM_RET_CODE_IDX)))); | 929 | be32_to_cpu(*((__be32 *) (data + TPM_RET_CODE_IDX)))); |
@@ -874,6 +935,7 @@ ssize_t tpm_show_caps_1_2(struct device * dev, | |||
874 | (int) data[19]); | 935 | (int) data[19]); |
875 | 936 | ||
876 | out: | 937 | out: |
938 | kfree(data); | ||
877 | return str - buf; | 939 | return str - buf; |
878 | } | 940 | } |
879 | EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); | 941 | EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); |
@@ -966,7 +1028,7 @@ ssize_t tpm_write(struct file *file, const char __user *buf, | |||
966 | size_t size, loff_t *off) | 1028 | size_t size, loff_t *off) |
967 | { | 1029 | { |
968 | struct tpm_chip *chip = file->private_data; | 1030 | struct tpm_chip *chip = file->private_data; |
969 | int in_size = size, out_size; | 1031 | size_t in_size = size, out_size; |
970 | 1032 | ||
971 | /* cannot perform a write until the read has cleared | 1033 | /* cannot perform a write until the read has cleared |
972 | either via tpm_read or a user_read_timer timeout */ | 1034 | either via tpm_read or a user_read_timer timeout */ |
@@ -1001,7 +1063,7 @@ ssize_t tpm_read(struct file *file, char __user *buf, | |||
1001 | size_t size, loff_t *off) | 1063 | size_t size, loff_t *off) |
1002 | { | 1064 | { |
1003 | struct tpm_chip *chip = file->private_data; | 1065 | struct tpm_chip *chip = file->private_data; |
1004 | int ret_size; | 1066 | ssize_t ret_size; |
1005 | 1067 | ||
1006 | del_singleshot_timer_sync(&chip->user_read_timer); | 1068 | del_singleshot_timer_sync(&chip->user_read_timer); |
1007 | flush_scheduled_work(); | 1069 | flush_scheduled_work(); |
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c index 60a2d2630e36..68f052b42ed7 100644 --- a/drivers/char/tpm/tpm_bios.c +++ b/drivers/char/tpm/tpm_bios.c | |||
@@ -448,7 +448,7 @@ out_free: | |||
448 | goto out; | 448 | goto out; |
449 | } | 449 | } |
450 | 450 | ||
451 | const struct file_operations tpm_ascii_bios_measurements_ops = { | 451 | static const struct file_operations tpm_ascii_bios_measurements_ops = { |
452 | .open = tpm_ascii_bios_measurements_open, | 452 | .open = tpm_ascii_bios_measurements_open, |
453 | .read = seq_read, | 453 | .read = seq_read, |
454 | .llseek = seq_lseek, | 454 | .llseek = seq_lseek, |
@@ -486,7 +486,7 @@ out_free: | |||
486 | goto out; | 486 | goto out; |
487 | } | 487 | } |
488 | 488 | ||
489 | const struct file_operations tpm_binary_bios_measurements_ops = { | 489 | static const struct file_operations tpm_binary_bios_measurements_ops = { |
490 | .open = tpm_binary_bios_measurements_open, | 490 | .open = tpm_binary_bios_measurements_open, |
491 | .read = seq_read, | 491 | .read = seq_read, |
492 | .llseek = seq_lseek, | 492 | .llseek = seq_lseek, |
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index c7a977bc03e8..ed1879c0dd8d 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c | |||
@@ -622,6 +622,7 @@ static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = { | |||
622 | {"ATM1200", 0}, /* Atmel */ | 622 | {"ATM1200", 0}, /* Atmel */ |
623 | {"IFX0102", 0}, /* Infineon */ | 623 | {"IFX0102", 0}, /* Infineon */ |
624 | {"BCM0101", 0}, /* Broadcom */ | 624 | {"BCM0101", 0}, /* Broadcom */ |
625 | {"BCM0102", 0}, /* Broadcom */ | ||
625 | {"NSC1200", 0}, /* National */ | 626 | {"NSC1200", 0}, /* National */ |
626 | {"ICO0102", 0}, /* Intel */ | 627 | {"ICO0102", 0}, /* Intel */ |
627 | /* Add new here */ | 628 | /* Add new here */ |
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c index bcbe794a3ea5..e14c03dc0065 100644 --- a/drivers/firewire/fw-iso.c +++ b/drivers/firewire/fw-iso.c | |||
@@ -50,7 +50,7 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | |||
50 | 50 | ||
51 | address = dma_map_page(card->device, buffer->pages[i], | 51 | address = dma_map_page(card->device, buffer->pages[i], |
52 | 0, PAGE_SIZE, direction); | 52 | 0, PAGE_SIZE, direction); |
53 | if (dma_mapping_error(address)) { | 53 | if (dma_mapping_error(card->device, address)) { |
54 | __free_page(buffer->pages[i]); | 54 | __free_page(buffer->pages[i]); |
55 | goto out_pages; | 55 | goto out_pages; |
56 | } | 56 | } |
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 333b12544dd1..566672e0bcff 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c | |||
@@ -953,7 +953,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet) | |||
953 | payload_bus = | 953 | payload_bus = |
954 | dma_map_single(ohci->card.device, packet->payload, | 954 | dma_map_single(ohci->card.device, packet->payload, |
955 | packet->payload_length, DMA_TO_DEVICE); | 955 | packet->payload_length, DMA_TO_DEVICE); |
956 | if (dma_mapping_error(payload_bus)) { | 956 | if (dma_mapping_error(ohci->card.device, payload_bus)) { |
957 | packet->ack = RCODE_SEND_ERROR; | 957 | packet->ack = RCODE_SEND_ERROR; |
958 | return -1; | 958 | return -1; |
959 | } | 959 | } |
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index 53fc5a641e6d..aaff50ebba1d 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c | |||
@@ -543,7 +543,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
543 | orb->response_bus = | 543 | orb->response_bus = |
544 | dma_map_single(device->card->device, &orb->response, | 544 | dma_map_single(device->card->device, &orb->response, |
545 | sizeof(orb->response), DMA_FROM_DEVICE); | 545 | sizeof(orb->response), DMA_FROM_DEVICE); |
546 | if (dma_mapping_error(orb->response_bus)) | 546 | if (dma_mapping_error(device->card->device, orb->response_bus)) |
547 | goto fail_mapping_response; | 547 | goto fail_mapping_response; |
548 | 548 | ||
549 | orb->request.response.high = 0; | 549 | orb->request.response.high = 0; |
@@ -577,7 +577,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
577 | orb->base.request_bus = | 577 | orb->base.request_bus = |
578 | dma_map_single(device->card->device, &orb->request, | 578 | dma_map_single(device->card->device, &orb->request, |
579 | sizeof(orb->request), DMA_TO_DEVICE); | 579 | sizeof(orb->request), DMA_TO_DEVICE); |
580 | if (dma_mapping_error(orb->base.request_bus)) | 580 | if (dma_mapping_error(device->card->device, orb->base.request_bus)) |
581 | goto fail_mapping_request; | 581 | goto fail_mapping_request; |
582 | 582 | ||
583 | sbp2_send_orb(&orb->base, lu, node_id, generation, | 583 | sbp2_send_orb(&orb->base, lu, node_id, generation, |
@@ -1424,7 +1424,7 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, | |||
1424 | orb->page_table_bus = | 1424 | orb->page_table_bus = |
1425 | dma_map_single(device->card->device, orb->page_table, | 1425 | dma_map_single(device->card->device, orb->page_table, |
1426 | sizeof(orb->page_table), DMA_TO_DEVICE); | 1426 | sizeof(orb->page_table), DMA_TO_DEVICE); |
1427 | if (dma_mapping_error(orb->page_table_bus)) | 1427 | if (dma_mapping_error(device->card->device, orb->page_table_bus)) |
1428 | goto fail_page_table; | 1428 | goto fail_page_table; |
1429 | 1429 | ||
1430 | /* | 1430 | /* |
@@ -1509,7 +1509,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1509 | orb->base.request_bus = | 1509 | orb->base.request_bus = |
1510 | dma_map_single(device->card->device, &orb->request, | 1510 | dma_map_single(device->card->device, &orb->request, |
1511 | sizeof(orb->request), DMA_TO_DEVICE); | 1511 | sizeof(orb->request), DMA_TO_DEVICE); |
1512 | if (dma_mapping_error(orb->base.request_bus)) | 1512 | if (dma_mapping_error(device->card->device, orb->base.request_bus)) |
1513 | goto out; | 1513 | goto out; |
1514 | 1514 | ||
1515 | sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation, | 1515 | sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation, |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 564138714bb5..452c2d866ec5 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -318,7 +318,7 @@ static void drm_cleanup(struct drm_device * dev) | |||
318 | DRM_ERROR("Cannot unload module\n"); | 318 | DRM_ERROR("Cannot unload module\n"); |
319 | } | 319 | } |
320 | 320 | ||
321 | int drm_minors_cleanup(int id, void *ptr, void *data) | 321 | static int drm_minors_cleanup(int id, void *ptr, void *data) |
322 | { | 322 | { |
323 | struct drm_minor *minor = ptr; | 323 | struct drm_minor *minor = ptr; |
324 | struct drm_device *dev; | 324 | struct drm_device *dev; |
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c index eaba03273e4f..284c9bca517e 100644 --- a/drivers/infiniband/hw/ipath/ipath_sdma.c +++ b/drivers/infiniband/hw/ipath/ipath_sdma.c | |||
@@ -698,7 +698,7 @@ retry: | |||
698 | 698 | ||
699 | addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, | 699 | addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, |
700 | tx->map_len, DMA_TO_DEVICE); | 700 | tx->map_len, DMA_TO_DEVICE); |
701 | if (dma_mapping_error(addr)) { | 701 | if (dma_mapping_error(&dd->pcidev->dev, addr)) { |
702 | ret = -EIO; | 702 | ret = -EIO; |
703 | goto unlock; | 703 | goto unlock; |
704 | } | 704 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c index 86e016916cd1..82d9a0b5ca2f 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c +++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c | |||
@@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd, | |||
206 | 206 | ||
207 | dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len, | 207 | dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len, |
208 | DMA_TO_DEVICE); | 208 | DMA_TO_DEVICE); |
209 | if (dma_mapping_error(dma_addr)) { | 209 | if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { |
210 | ret = -ENOMEM; | 210 | ret = -ENOMEM; |
211 | goto free_unmap; | 211 | goto free_unmap; |
212 | } | 212 | } |
@@ -301,7 +301,7 @@ static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd, | |||
301 | pages[j], 0, flen, DMA_TO_DEVICE); | 301 | pages[j], 0, flen, DMA_TO_DEVICE); |
302 | unsigned long fofs = addr & ~PAGE_MASK; | 302 | unsigned long fofs = addr & ~PAGE_MASK; |
303 | 303 | ||
304 | if (dma_mapping_error(dma_addr)) { | 304 | if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { |
305 | ret = -ENOMEM; | 305 | ret = -ENOMEM; |
306 | goto done; | 306 | goto done; |
307 | } | 307 | } |
@@ -508,7 +508,7 @@ static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd, | |||
508 | if (page) { | 508 | if (page) { |
509 | dma_addr = dma_map_page(&dd->pcidev->dev, | 509 | dma_addr = dma_map_page(&dd->pcidev->dev, |
510 | page, 0, len, DMA_TO_DEVICE); | 510 | page, 0, len, DMA_TO_DEVICE); |
511 | if (dma_mapping_error(dma_addr)) { | 511 | if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { |
512 | ret = -ENOMEM; | 512 | ret = -ENOMEM; |
513 | goto free_pbc; | 513 | goto free_pbc; |
514 | } | 514 | } |
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 4e36aa7cb3d2..cc6858f0b65b 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c | |||
@@ -780,7 +780,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) | |||
780 | return -ENOMEM; | 780 | return -ENOMEM; |
781 | dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0, | 781 | dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0, |
782 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 782 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
783 | if (pci_dma_mapping_error(dev->eq_table.icm_dma)) { | 783 | if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) { |
784 | __free_page(dev->eq_table.icm_page); | 784 | __free_page(dev->eq_table.icm_page); |
785 | return -ENOMEM; | 785 | return -ENOMEM; |
786 | } | 786 | } |
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c index c0b4db2f8364..1925118122f8 100644 --- a/drivers/isdn/hisax/hisax_fcpcipnp.c +++ b/drivers/isdn/hisax/hisax_fcpcipnp.c | |||
@@ -974,8 +974,6 @@ static struct pnp_driver fcpnp_driver = { | |||
974 | .remove = __devexit_p(fcpnp_remove), | 974 | .remove = __devexit_p(fcpnp_remove), |
975 | .id_table = fcpnp_ids, | 975 | .id_table = fcpnp_ids, |
976 | }; | 976 | }; |
977 | #else | ||
978 | static struct pnp_driver fcpnp_driver; | ||
979 | #endif | 977 | #endif |
980 | 978 | ||
981 | static void __devexit fcpci_remove(struct pci_dev *pdev) | 979 | static void __devexit fcpci_remove(struct pci_dev *pdev) |
diff --git a/drivers/media/dvb/pluto2/pluto2.c b/drivers/media/dvb/pluto2/pluto2.c index 1360403b88b6..a9653c63f4db 100644 --- a/drivers/media/dvb/pluto2/pluto2.c +++ b/drivers/media/dvb/pluto2/pluto2.c | |||
@@ -242,7 +242,7 @@ static int __devinit pluto_dma_map(struct pluto *pluto) | |||
242 | pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf, | 242 | pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf, |
243 | TS_DMA_BYTES, PCI_DMA_FROMDEVICE); | 243 | TS_DMA_BYTES, PCI_DMA_FROMDEVICE); |
244 | 244 | ||
245 | return pci_dma_mapping_error(pluto->dma_addr); | 245 | return pci_dma_mapping_error(pluto->pdev, pluto->dma_addr); |
246 | } | 246 | } |
247 | 247 | ||
248 | static void pluto_dma_unmap(struct pluto *pluto) | 248 | static void pluto_dma_unmap(struct pluto *pluto) |
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index 61b98c333cb0..a38005008a20 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c | |||
@@ -249,8 +249,11 @@ EXPORT_SYMBOL(memstick_next_req); | |||
249 | */ | 249 | */ |
250 | void memstick_new_req(struct memstick_host *host) | 250 | void memstick_new_req(struct memstick_host *host) |
251 | { | 251 | { |
252 | host->retries = cmd_retries; | 252 | if (host->card) { |
253 | host->request(host); | 253 | host->retries = cmd_retries; |
254 | INIT_COMPLETION(host->card->mrq_complete); | ||
255 | host->request(host); | ||
256 | } | ||
254 | } | 257 | } |
255 | EXPORT_SYMBOL(memstick_new_req); | 258 | EXPORT_SYMBOL(memstick_new_req); |
256 | 259 | ||
@@ -415,10 +418,14 @@ err_out: | |||
415 | return NULL; | 418 | return NULL; |
416 | } | 419 | } |
417 | 420 | ||
418 | static void memstick_power_on(struct memstick_host *host) | 421 | static int memstick_power_on(struct memstick_host *host) |
419 | { | 422 | { |
420 | host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON); | 423 | int rc = host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON); |
421 | host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL); | 424 | |
425 | if (!rc) | ||
426 | rc = host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL); | ||
427 | |||
428 | return rc; | ||
422 | } | 429 | } |
423 | 430 | ||
424 | static void memstick_check(struct work_struct *work) | 431 | static void memstick_check(struct work_struct *work) |
@@ -429,8 +436,11 @@ static void memstick_check(struct work_struct *work) | |||
429 | 436 | ||
430 | dev_dbg(&host->dev, "memstick_check started\n"); | 437 | dev_dbg(&host->dev, "memstick_check started\n"); |
431 | mutex_lock(&host->lock); | 438 | mutex_lock(&host->lock); |
432 | if (!host->card) | 439 | if (!host->card) { |
433 | memstick_power_on(host); | 440 | if (memstick_power_on(host)) |
441 | goto out_power_off; | ||
442 | } else | ||
443 | host->card->stop(host->card); | ||
434 | 444 | ||
435 | card = memstick_alloc_card(host); | 445 | card = memstick_alloc_card(host); |
436 | 446 | ||
@@ -448,7 +458,8 @@ static void memstick_check(struct work_struct *work) | |||
448 | || !(host->card->check(host->card))) { | 458 | || !(host->card->check(host->card))) { |
449 | device_unregister(&host->card->dev); | 459 | device_unregister(&host->card->dev); |
450 | host->card = NULL; | 460 | host->card = NULL; |
451 | } | 461 | } else |
462 | host->card->start(host->card); | ||
452 | } | 463 | } |
453 | 464 | ||
454 | if (!host->card) { | 465 | if (!host->card) { |
@@ -461,6 +472,7 @@ static void memstick_check(struct work_struct *work) | |||
461 | kfree(card); | 472 | kfree(card); |
462 | } | 473 | } |
463 | 474 | ||
475 | out_power_off: | ||
464 | if (!host->card) | 476 | if (!host->card) |
465 | host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); | 477 | host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); |
466 | 478 | ||
@@ -573,11 +585,15 @@ EXPORT_SYMBOL(memstick_suspend_host); | |||
573 | */ | 585 | */ |
574 | void memstick_resume_host(struct memstick_host *host) | 586 | void memstick_resume_host(struct memstick_host *host) |
575 | { | 587 | { |
588 | int rc = 0; | ||
589 | |||
576 | mutex_lock(&host->lock); | 590 | mutex_lock(&host->lock); |
577 | if (host->card) | 591 | if (host->card) |
578 | memstick_power_on(host); | 592 | rc = memstick_power_on(host); |
579 | mutex_unlock(&host->lock); | 593 | mutex_unlock(&host->lock); |
580 | memstick_detect_change(host); | 594 | |
595 | if (!rc) | ||
596 | memstick_detect_change(host); | ||
581 | } | 597 | } |
582 | EXPORT_SYMBOL(memstick_resume_host); | 598 | EXPORT_SYMBOL(memstick_resume_host); |
583 | 599 | ||
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index 477d0fb6e588..44b1817f2f2f 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c | |||
@@ -136,9 +136,8 @@ struct mspro_block_data { | |||
136 | unsigned int caps; | 136 | unsigned int caps; |
137 | struct gendisk *disk; | 137 | struct gendisk *disk; |
138 | struct request_queue *queue; | 138 | struct request_queue *queue; |
139 | struct request *block_req; | ||
139 | spinlock_t q_lock; | 140 | spinlock_t q_lock; |
140 | wait_queue_head_t q_wait; | ||
141 | struct task_struct *q_thread; | ||
142 | 141 | ||
143 | unsigned short page_size; | 142 | unsigned short page_size; |
144 | unsigned short cylinders; | 143 | unsigned short cylinders; |
@@ -147,9 +146,10 @@ struct mspro_block_data { | |||
147 | 146 | ||
148 | unsigned char system; | 147 | unsigned char system; |
149 | unsigned char read_only:1, | 148 | unsigned char read_only:1, |
150 | active:1, | 149 | eject:1, |
151 | has_request:1, | 150 | has_request:1, |
152 | data_dir:1; | 151 | data_dir:1, |
152 | active:1; | ||
153 | unsigned char transfer_cmd; | 153 | unsigned char transfer_cmd; |
154 | 154 | ||
155 | int (*mrq_handler)(struct memstick_dev *card, | 155 | int (*mrq_handler)(struct memstick_dev *card, |
@@ -160,12 +160,14 @@ struct mspro_block_data { | |||
160 | struct scatterlist req_sg[MSPRO_BLOCK_MAX_SEGS]; | 160 | struct scatterlist req_sg[MSPRO_BLOCK_MAX_SEGS]; |
161 | unsigned int seg_count; | 161 | unsigned int seg_count; |
162 | unsigned int current_seg; | 162 | unsigned int current_seg; |
163 | unsigned short current_page; | 163 | unsigned int current_page; |
164 | }; | 164 | }; |
165 | 165 | ||
166 | static DEFINE_IDR(mspro_block_disk_idr); | 166 | static DEFINE_IDR(mspro_block_disk_idr); |
167 | static DEFINE_MUTEX(mspro_block_disk_lock); | 167 | static DEFINE_MUTEX(mspro_block_disk_lock); |
168 | 168 | ||
169 | static int mspro_block_complete_req(struct memstick_dev *card, int error); | ||
170 | |||
169 | /*** Block device ***/ | 171 | /*** Block device ***/ |
170 | 172 | ||
171 | static int mspro_block_bd_open(struct inode *inode, struct file *filp) | 173 | static int mspro_block_bd_open(struct inode *inode, struct file *filp) |
@@ -197,8 +199,10 @@ static int mspro_block_disk_release(struct gendisk *disk) | |||
197 | 199 | ||
198 | mutex_lock(&mspro_block_disk_lock); | 200 | mutex_lock(&mspro_block_disk_lock); |
199 | 201 | ||
200 | if (msb->usage_count) { | 202 | if (msb) { |
201 | msb->usage_count--; | 203 | if (msb->usage_count) |
204 | msb->usage_count--; | ||
205 | |||
202 | if (!msb->usage_count) { | 206 | if (!msb->usage_count) { |
203 | kfree(msb); | 207 | kfree(msb); |
204 | disk->private_data = NULL; | 208 | disk->private_data = NULL; |
@@ -523,11 +527,13 @@ static int h_mspro_block_req_init(struct memstick_dev *card, | |||
523 | static int h_mspro_block_default(struct memstick_dev *card, | 527 | static int h_mspro_block_default(struct memstick_dev *card, |
524 | struct memstick_request **mrq) | 528 | struct memstick_request **mrq) |
525 | { | 529 | { |
526 | complete(&card->mrq_complete); | 530 | return mspro_block_complete_req(card, (*mrq)->error); |
527 | if (!(*mrq)->error) | 531 | } |
528 | return -EAGAIN; | 532 | |
529 | else | 533 | static int h_mspro_block_default_bad(struct memstick_dev *card, |
530 | return (*mrq)->error; | 534 | struct memstick_request **mrq) |
535 | { | ||
536 | return -ENXIO; | ||
531 | } | 537 | } |
532 | 538 | ||
533 | static int h_mspro_block_get_ro(struct memstick_dev *card, | 539 | static int h_mspro_block_get_ro(struct memstick_dev *card, |
@@ -535,44 +541,30 @@ static int h_mspro_block_get_ro(struct memstick_dev *card, | |||
535 | { | 541 | { |
536 | struct mspro_block_data *msb = memstick_get_drvdata(card); | 542 | struct mspro_block_data *msb = memstick_get_drvdata(card); |
537 | 543 | ||
538 | if ((*mrq)->error) { | 544 | if (!(*mrq)->error) { |
539 | complete(&card->mrq_complete); | 545 | if ((*mrq)->data[offsetof(struct ms_status_register, status0)] |
540 | return (*mrq)->error; | 546 | & MEMSTICK_STATUS0_WP) |
547 | msb->read_only = 1; | ||
548 | else | ||
549 | msb->read_only = 0; | ||
541 | } | 550 | } |
542 | 551 | ||
543 | if ((*mrq)->data[offsetof(struct ms_status_register, status0)] | 552 | return mspro_block_complete_req(card, (*mrq)->error); |
544 | & MEMSTICK_STATUS0_WP) | ||
545 | msb->read_only = 1; | ||
546 | else | ||
547 | msb->read_only = 0; | ||
548 | |||
549 | complete(&card->mrq_complete); | ||
550 | return -EAGAIN; | ||
551 | } | 553 | } |
552 | 554 | ||
553 | static int h_mspro_block_wait_for_ced(struct memstick_dev *card, | 555 | static int h_mspro_block_wait_for_ced(struct memstick_dev *card, |
554 | struct memstick_request **mrq) | 556 | struct memstick_request **mrq) |
555 | { | 557 | { |
556 | if ((*mrq)->error) { | ||
557 | complete(&card->mrq_complete); | ||
558 | return (*mrq)->error; | ||
559 | } | ||
560 | |||
561 | dev_dbg(&card->dev, "wait for ced: value %x\n", (*mrq)->data[0]); | 558 | dev_dbg(&card->dev, "wait for ced: value %x\n", (*mrq)->data[0]); |
562 | 559 | ||
563 | if ((*mrq)->data[0] & (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)) { | 560 | if (!(*mrq)->error) { |
564 | card->current_mrq.error = -EFAULT; | 561 | if ((*mrq)->data[0] & (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)) |
565 | complete(&card->mrq_complete); | 562 | (*mrq)->error = -EFAULT; |
566 | return card->current_mrq.error; | 563 | else if (!((*mrq)->data[0] & MEMSTICK_INT_CED)) |
564 | return 0; | ||
567 | } | 565 | } |
568 | 566 | ||
569 | if (!((*mrq)->data[0] & MEMSTICK_INT_CED)) | 567 | return mspro_block_complete_req(card, (*mrq)->error); |
570 | return 0; | ||
571 | else { | ||
572 | card->current_mrq.error = 0; | ||
573 | complete(&card->mrq_complete); | ||
574 | return -EAGAIN; | ||
575 | } | ||
576 | } | 568 | } |
577 | 569 | ||
578 | static int h_mspro_block_transfer_data(struct memstick_dev *card, | 570 | static int h_mspro_block_transfer_data(struct memstick_dev *card, |
@@ -583,10 +575,8 @@ static int h_mspro_block_transfer_data(struct memstick_dev *card, | |||
583 | struct scatterlist t_sg = { 0 }; | 575 | struct scatterlist t_sg = { 0 }; |
584 | size_t t_offset; | 576 | size_t t_offset; |
585 | 577 | ||
586 | if ((*mrq)->error) { | 578 | if ((*mrq)->error) |
587 | complete(&card->mrq_complete); | 579 | return mspro_block_complete_req(card, (*mrq)->error); |
588 | return (*mrq)->error; | ||
589 | } | ||
590 | 580 | ||
591 | switch ((*mrq)->tpc) { | 581 | switch ((*mrq)->tpc) { |
592 | case MS_TPC_WRITE_REG: | 582 | case MS_TPC_WRITE_REG: |
@@ -617,8 +607,8 @@ has_int_reg: | |||
617 | 607 | ||
618 | if (msb->current_seg == msb->seg_count) { | 608 | if (msb->current_seg == msb->seg_count) { |
619 | if (t_val & MEMSTICK_INT_CED) { | 609 | if (t_val & MEMSTICK_INT_CED) { |
620 | complete(&card->mrq_complete); | 610 | return mspro_block_complete_req(card, |
621 | return -EAGAIN; | 611 | 0); |
622 | } else { | 612 | } else { |
623 | card->next_request | 613 | card->next_request |
624 | = h_mspro_block_wait_for_ced; | 614 | = h_mspro_block_wait_for_ced; |
@@ -666,140 +656,184 @@ has_int_reg: | |||
666 | 656 | ||
667 | /*** Data transfer ***/ | 657 | /*** Data transfer ***/ |
668 | 658 | ||
669 | static void mspro_block_process_request(struct memstick_dev *card, | 659 | static int mspro_block_issue_req(struct memstick_dev *card, int chunk) |
670 | struct request *req) | ||
671 | { | 660 | { |
672 | struct mspro_block_data *msb = memstick_get_drvdata(card); | 661 | struct mspro_block_data *msb = memstick_get_drvdata(card); |
673 | struct mspro_param_register param; | ||
674 | int rc, chunk, cnt; | ||
675 | unsigned short page_count; | ||
676 | sector_t t_sec; | 662 | sector_t t_sec; |
677 | unsigned long flags; | 663 | unsigned int count; |
664 | struct mspro_param_register param; | ||
678 | 665 | ||
679 | do { | 666 | try_again: |
680 | page_count = 0; | 667 | while (chunk) { |
668 | msb->current_page = 0; | ||
681 | msb->current_seg = 0; | 669 | msb->current_seg = 0; |
682 | msb->seg_count = blk_rq_map_sg(req->q, req, msb->req_sg); | 670 | msb->seg_count = blk_rq_map_sg(msb->block_req->q, |
671 | msb->block_req, | ||
672 | msb->req_sg); | ||
683 | 673 | ||
684 | if (msb->seg_count) { | 674 | if (!msb->seg_count) { |
685 | msb->current_page = 0; | 675 | chunk = __blk_end_request(msb->block_req, -ENOMEM, |
686 | for (rc = 0; rc < msb->seg_count; rc++) | 676 | blk_rq_cur_bytes(msb->block_req)); |
687 | page_count += msb->req_sg[rc].length | 677 | continue; |
688 | / msb->page_size; | 678 | } |
689 | |||
690 | t_sec = req->sector; | ||
691 | sector_div(t_sec, msb->page_size >> 9); | ||
692 | param.system = msb->system; | ||
693 | param.data_count = cpu_to_be16(page_count); | ||
694 | param.data_address = cpu_to_be32((uint32_t)t_sec); | ||
695 | param.tpc_param = 0; | ||
696 | |||
697 | msb->data_dir = rq_data_dir(req); | ||
698 | msb->transfer_cmd = msb->data_dir == READ | ||
699 | ? MSPRO_CMD_READ_DATA | ||
700 | : MSPRO_CMD_WRITE_DATA; | ||
701 | |||
702 | dev_dbg(&card->dev, "data transfer: cmd %x, " | ||
703 | "lba %x, count %x\n", msb->transfer_cmd, | ||
704 | be32_to_cpu(param.data_address), | ||
705 | page_count); | ||
706 | |||
707 | card->next_request = h_mspro_block_req_init; | ||
708 | msb->mrq_handler = h_mspro_block_transfer_data; | ||
709 | memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, | ||
710 | ¶m, sizeof(param)); | ||
711 | memstick_new_req(card->host); | ||
712 | wait_for_completion(&card->mrq_complete); | ||
713 | rc = card->current_mrq.error; | ||
714 | 679 | ||
715 | if (rc || (card->current_mrq.tpc == MSPRO_CMD_STOP)) { | 680 | t_sec = msb->block_req->sector << 9; |
716 | for (cnt = 0; cnt < msb->current_seg; cnt++) | 681 | sector_div(t_sec, msb->page_size); |
717 | page_count += msb->req_sg[cnt].length | ||
718 | / msb->page_size; | ||
719 | |||
720 | if (msb->current_page) | ||
721 | page_count += msb->current_page - 1; | ||
722 | |||
723 | if (page_count && (msb->data_dir == READ)) | ||
724 | rc = msb->page_size * page_count; | ||
725 | else | ||
726 | rc = -EIO; | ||
727 | } else | ||
728 | rc = msb->page_size * page_count; | ||
729 | } else | ||
730 | rc = -EFAULT; | ||
731 | 682 | ||
732 | spin_lock_irqsave(&msb->q_lock, flags); | 683 | count = msb->block_req->nr_sectors << 9; |
733 | if (rc >= 0) | 684 | count /= msb->page_size; |
734 | chunk = __blk_end_request(req, 0, rc); | ||
735 | else | ||
736 | chunk = __blk_end_request(req, rc, 0); | ||
737 | 685 | ||
738 | dev_dbg(&card->dev, "end chunk %d, %d\n", rc, chunk); | 686 | param.system = msb->system; |
739 | spin_unlock_irqrestore(&msb->q_lock, flags); | 687 | param.data_count = cpu_to_be16(count); |
740 | } while (chunk); | 688 | param.data_address = cpu_to_be32((uint32_t)t_sec); |
689 | param.tpc_param = 0; | ||
690 | |||
691 | msb->data_dir = rq_data_dir(msb->block_req); | ||
692 | msb->transfer_cmd = msb->data_dir == READ | ||
693 | ? MSPRO_CMD_READ_DATA | ||
694 | : MSPRO_CMD_WRITE_DATA; | ||
695 | |||
696 | dev_dbg(&card->dev, "data transfer: cmd %x, " | ||
697 | "lba %x, count %x\n", msb->transfer_cmd, | ||
698 | be32_to_cpu(param.data_address), count); | ||
699 | |||
700 | card->next_request = h_mspro_block_req_init; | ||
701 | msb->mrq_handler = h_mspro_block_transfer_data; | ||
702 | memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, | ||
703 | ¶m, sizeof(param)); | ||
704 | memstick_new_req(card->host); | ||
705 | return 0; | ||
706 | } | ||
707 | |||
708 | dev_dbg(&card->dev, "elv_next\n"); | ||
709 | msb->block_req = elv_next_request(msb->queue); | ||
710 | if (!msb->block_req) { | ||
711 | dev_dbg(&card->dev, "issue end\n"); | ||
712 | return -EAGAIN; | ||
713 | } | ||
714 | |||
715 | dev_dbg(&card->dev, "trying again\n"); | ||
716 | chunk = 1; | ||
717 | goto try_again; | ||
741 | } | 718 | } |
742 | 719 | ||
743 | static int mspro_block_has_request(struct mspro_block_data *msb) | 720 | static int mspro_block_complete_req(struct memstick_dev *card, int error) |
744 | { | 721 | { |
745 | int rc = 0; | 722 | struct mspro_block_data *msb = memstick_get_drvdata(card); |
723 | int chunk, cnt; | ||
724 | unsigned int t_len = 0; | ||
746 | unsigned long flags; | 725 | unsigned long flags; |
747 | 726 | ||
748 | spin_lock_irqsave(&msb->q_lock, flags); | 727 | spin_lock_irqsave(&msb->q_lock, flags); |
749 | if (kthread_should_stop() || msb->has_request) | 728 | dev_dbg(&card->dev, "complete %d, %d\n", msb->has_request ? 1 : 0, |
750 | rc = 1; | 729 | error); |
730 | |||
731 | if (msb->has_request) { | ||
732 | /* Nothing to do - not really an error */ | ||
733 | if (error == -EAGAIN) | ||
734 | error = 0; | ||
735 | |||
736 | if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) { | ||
737 | if (msb->data_dir == READ) { | ||
738 | for (cnt = 0; cnt < msb->current_seg; cnt++) | ||
739 | t_len += msb->req_sg[cnt].length | ||
740 | / msb->page_size; | ||
741 | |||
742 | if (msb->current_page) | ||
743 | t_len += msb->current_page - 1; | ||
744 | |||
745 | t_len *= msb->page_size; | ||
746 | } | ||
747 | } else | ||
748 | t_len = msb->block_req->nr_sectors << 9; | ||
749 | |||
750 | dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error); | ||
751 | |||
752 | if (error && !t_len) | ||
753 | t_len = blk_rq_cur_bytes(msb->block_req); | ||
754 | |||
755 | chunk = __blk_end_request(msb->block_req, error, t_len); | ||
756 | |||
757 | error = mspro_block_issue_req(card, chunk); | ||
758 | |||
759 | if (!error) | ||
760 | goto out; | ||
761 | else | ||
762 | msb->has_request = 0; | ||
763 | } else { | ||
764 | if (!error) | ||
765 | error = -EAGAIN; | ||
766 | } | ||
767 | |||
768 | card->next_request = h_mspro_block_default_bad; | ||
769 | complete_all(&card->mrq_complete); | ||
770 | out: | ||
751 | spin_unlock_irqrestore(&msb->q_lock, flags); | 771 | spin_unlock_irqrestore(&msb->q_lock, flags); |
752 | return rc; | 772 | return error; |
753 | } | 773 | } |
754 | 774 | ||
755 | static int mspro_block_queue_thread(void *data) | 775 | static void mspro_block_stop(struct memstick_dev *card) |
756 | { | 776 | { |
757 | struct memstick_dev *card = data; | ||
758 | struct memstick_host *host = card->host; | ||
759 | struct mspro_block_data *msb = memstick_get_drvdata(card); | 777 | struct mspro_block_data *msb = memstick_get_drvdata(card); |
760 | struct request *req; | 778 | int rc = 0; |
761 | unsigned long flags; | 779 | unsigned long flags; |
762 | 780 | ||
763 | while (1) { | 781 | while (1) { |
764 | wait_event(msb->q_wait, mspro_block_has_request(msb)); | ||
765 | dev_dbg(&card->dev, "thread iter\n"); | ||
766 | |||
767 | spin_lock_irqsave(&msb->q_lock, flags); | 782 | spin_lock_irqsave(&msb->q_lock, flags); |
768 | req = elv_next_request(msb->queue); | 783 | if (!msb->has_request) { |
769 | dev_dbg(&card->dev, "next req %p\n", req); | 784 | blk_stop_queue(msb->queue); |
770 | if (!req) { | 785 | rc = 1; |
771 | msb->has_request = 0; | 786 | } |
772 | if (kthread_should_stop()) { | ||
773 | spin_unlock_irqrestore(&msb->q_lock, flags); | ||
774 | break; | ||
775 | } | ||
776 | } else | ||
777 | msb->has_request = 1; | ||
778 | spin_unlock_irqrestore(&msb->q_lock, flags); | 787 | spin_unlock_irqrestore(&msb->q_lock, flags); |
779 | 788 | ||
780 | if (req) { | 789 | if (rc) |
781 | mutex_lock(&host->lock); | 790 | break; |
782 | mspro_block_process_request(card, req); | 791 | |
783 | mutex_unlock(&host->lock); | 792 | wait_for_completion(&card->mrq_complete); |
784 | } | ||
785 | } | 793 | } |
786 | dev_dbg(&card->dev, "thread finished\n"); | ||
787 | return 0; | ||
788 | } | 794 | } |
789 | 795 | ||
790 | static void mspro_block_request(struct request_queue *q) | 796 | static void mspro_block_start(struct memstick_dev *card) |
797 | { | ||
798 | struct mspro_block_data *msb = memstick_get_drvdata(card); | ||
799 | unsigned long flags; | ||
800 | |||
801 | spin_lock_irqsave(&msb->q_lock, flags); | ||
802 | blk_start_queue(msb->queue); | ||
803 | spin_unlock_irqrestore(&msb->q_lock, flags); | ||
804 | } | ||
805 | |||
806 | static int mspro_block_prepare_req(struct request_queue *q, struct request *req) | ||
807 | { | ||
808 | if (!blk_fs_request(req) && !blk_pc_request(req)) { | ||
809 | blk_dump_rq_flags(req, "MSPro unsupported request"); | ||
810 | return BLKPREP_KILL; | ||
811 | } | ||
812 | |||
813 | req->cmd_flags |= REQ_DONTPREP; | ||
814 | |||
815 | return BLKPREP_OK; | ||
816 | } | ||
817 | |||
818 | static void mspro_block_submit_req(struct request_queue *q) | ||
791 | { | 819 | { |
792 | struct memstick_dev *card = q->queuedata; | 820 | struct memstick_dev *card = q->queuedata; |
793 | struct mspro_block_data *msb = memstick_get_drvdata(card); | 821 | struct mspro_block_data *msb = memstick_get_drvdata(card); |
794 | struct request *req = NULL; | 822 | struct request *req = NULL; |
795 | 823 | ||
796 | if (msb->q_thread) { | 824 | if (msb->has_request) |
797 | msb->has_request = 1; | 825 | return; |
798 | wake_up_all(&msb->q_wait); | 826 | |
799 | } else { | 827 | if (msb->eject) { |
800 | while ((req = elv_next_request(q)) != NULL) | 828 | while ((req = elv_next_request(q)) != NULL) |
801 | end_queued_request(req, -ENODEV); | 829 | end_queued_request(req, -ENODEV); |
830 | |||
831 | return; | ||
802 | } | 832 | } |
833 | |||
834 | msb->has_request = 1; | ||
835 | if (mspro_block_issue_req(card, 0)) | ||
836 | msb->has_request = 0; | ||
803 | } | 837 | } |
804 | 838 | ||
805 | /*** Initialization ***/ | 839 | /*** Initialization ***/ |
@@ -1169,16 +1203,14 @@ static int mspro_block_init_disk(struct memstick_dev *card) | |||
1169 | goto out_release_id; | 1203 | goto out_release_id; |
1170 | } | 1204 | } |
1171 | 1205 | ||
1172 | spin_lock_init(&msb->q_lock); | 1206 | msb->queue = blk_init_queue(mspro_block_submit_req, &msb->q_lock); |
1173 | init_waitqueue_head(&msb->q_wait); | ||
1174 | |||
1175 | msb->queue = blk_init_queue(mspro_block_request, &msb->q_lock); | ||
1176 | if (!msb->queue) { | 1207 | if (!msb->queue) { |
1177 | rc = -ENOMEM; | 1208 | rc = -ENOMEM; |
1178 | goto out_put_disk; | 1209 | goto out_put_disk; |
1179 | } | 1210 | } |
1180 | 1211 | ||
1181 | msb->queue->queuedata = card; | 1212 | msb->queue->queuedata = card; |
1213 | blk_queue_prep_rq(msb->queue, mspro_block_prepare_req); | ||
1182 | 1214 | ||
1183 | blk_queue_bounce_limit(msb->queue, limit); | 1215 | blk_queue_bounce_limit(msb->queue, limit); |
1184 | blk_queue_max_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES); | 1216 | blk_queue_max_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES); |
@@ -1204,14 +1236,8 @@ static int mspro_block_init_disk(struct memstick_dev *card) | |||
1204 | capacity *= msb->page_size >> 9; | 1236 | capacity *= msb->page_size >> 9; |
1205 | set_capacity(msb->disk, capacity); | 1237 | set_capacity(msb->disk, capacity); |
1206 | dev_dbg(&card->dev, "capacity set %ld\n", capacity); | 1238 | dev_dbg(&card->dev, "capacity set %ld\n", capacity); |
1207 | msb->q_thread = kthread_run(mspro_block_queue_thread, card, | ||
1208 | DRIVER_NAME"d"); | ||
1209 | if (IS_ERR(msb->q_thread)) | ||
1210 | goto out_put_disk; | ||
1211 | 1239 | ||
1212 | mutex_unlock(&host->lock); | ||
1213 | add_disk(msb->disk); | 1240 | add_disk(msb->disk); |
1214 | mutex_lock(&host->lock); | ||
1215 | msb->active = 1; | 1241 | msb->active = 1; |
1216 | return 0; | 1242 | return 0; |
1217 | 1243 | ||
@@ -1259,6 +1285,7 @@ static int mspro_block_probe(struct memstick_dev *card) | |||
1259 | return -ENOMEM; | 1285 | return -ENOMEM; |
1260 | memstick_set_drvdata(card, msb); | 1286 | memstick_set_drvdata(card, msb); |
1261 | msb->card = card; | 1287 | msb->card = card; |
1288 | spin_lock_init(&msb->q_lock); | ||
1262 | 1289 | ||
1263 | rc = mspro_block_init_card(card); | 1290 | rc = mspro_block_init_card(card); |
1264 | 1291 | ||
@@ -1272,6 +1299,8 @@ static int mspro_block_probe(struct memstick_dev *card) | |||
1272 | rc = mspro_block_init_disk(card); | 1299 | rc = mspro_block_init_disk(card); |
1273 | if (!rc) { | 1300 | if (!rc) { |
1274 | card->check = mspro_block_check_card; | 1301 | card->check = mspro_block_check_card; |
1302 | card->stop = mspro_block_stop; | ||
1303 | card->start = mspro_block_start; | ||
1275 | return 0; | 1304 | return 0; |
1276 | } | 1305 | } |
1277 | 1306 | ||
@@ -1286,26 +1315,17 @@ out_free: | |||
1286 | static void mspro_block_remove(struct memstick_dev *card) | 1315 | static void mspro_block_remove(struct memstick_dev *card) |
1287 | { | 1316 | { |
1288 | struct mspro_block_data *msb = memstick_get_drvdata(card); | 1317 | struct mspro_block_data *msb = memstick_get_drvdata(card); |
1289 | struct task_struct *q_thread = NULL; | ||
1290 | unsigned long flags; | 1318 | unsigned long flags; |
1291 | 1319 | ||
1292 | del_gendisk(msb->disk); | 1320 | del_gendisk(msb->disk); |
1293 | dev_dbg(&card->dev, "mspro block remove\n"); | 1321 | dev_dbg(&card->dev, "mspro block remove\n"); |
1294 | spin_lock_irqsave(&msb->q_lock, flags); | 1322 | spin_lock_irqsave(&msb->q_lock, flags); |
1295 | q_thread = msb->q_thread; | 1323 | msb->eject = 1; |
1296 | msb->q_thread = NULL; | 1324 | blk_start_queue(msb->queue); |
1297 | msb->active = 0; | ||
1298 | spin_unlock_irqrestore(&msb->q_lock, flags); | 1325 | spin_unlock_irqrestore(&msb->q_lock, flags); |
1299 | 1326 | ||
1300 | if (q_thread) { | ||
1301 | mutex_unlock(&card->host->lock); | ||
1302 | kthread_stop(q_thread); | ||
1303 | mutex_lock(&card->host->lock); | ||
1304 | } | ||
1305 | |||
1306 | dev_dbg(&card->dev, "queue thread stopped\n"); | ||
1307 | |||
1308 | blk_cleanup_queue(msb->queue); | 1327 | blk_cleanup_queue(msb->queue); |
1328 | msb->queue = NULL; | ||
1309 | 1329 | ||
1310 | sysfs_remove_group(&card->dev.kobj, &msb->attr_group); | 1330 | sysfs_remove_group(&card->dev.kobj, &msb->attr_group); |
1311 | 1331 | ||
@@ -1322,19 +1342,13 @@ static void mspro_block_remove(struct memstick_dev *card) | |||
1322 | static int mspro_block_suspend(struct memstick_dev *card, pm_message_t state) | 1342 | static int mspro_block_suspend(struct memstick_dev *card, pm_message_t state) |
1323 | { | 1343 | { |
1324 | struct mspro_block_data *msb = memstick_get_drvdata(card); | 1344 | struct mspro_block_data *msb = memstick_get_drvdata(card); |
1325 | struct task_struct *q_thread = NULL; | ||
1326 | unsigned long flags; | 1345 | unsigned long flags; |
1327 | 1346 | ||
1328 | spin_lock_irqsave(&msb->q_lock, flags); | 1347 | spin_lock_irqsave(&msb->q_lock, flags); |
1329 | q_thread = msb->q_thread; | ||
1330 | msb->q_thread = NULL; | ||
1331 | msb->active = 0; | ||
1332 | blk_stop_queue(msb->queue); | 1348 | blk_stop_queue(msb->queue); |
1349 | msb->active = 0; | ||
1333 | spin_unlock_irqrestore(&msb->q_lock, flags); | 1350 | spin_unlock_irqrestore(&msb->q_lock, flags); |
1334 | 1351 | ||
1335 | if (q_thread) | ||
1336 | kthread_stop(q_thread); | ||
1337 | |||
1338 | return 0; | 1352 | return 0; |
1339 | } | 1353 | } |
1340 | 1354 | ||
@@ -1373,14 +1387,7 @@ static int mspro_block_resume(struct memstick_dev *card) | |||
1373 | if (memcmp(s_attr->data, r_attr->data, s_attr->size)) | 1387 | if (memcmp(s_attr->data, r_attr->data, s_attr->size)) |
1374 | break; | 1388 | break; |
1375 | 1389 | ||
1376 | memstick_set_drvdata(card, msb); | 1390 | msb->active = 1; |
1377 | msb->q_thread = kthread_run(mspro_block_queue_thread, | ||
1378 | card, DRIVER_NAME"d"); | ||
1379 | if (IS_ERR(msb->q_thread)) | ||
1380 | msb->q_thread = NULL; | ||
1381 | else | ||
1382 | msb->active = 1; | ||
1383 | |||
1384 | break; | 1391 | break; |
1385 | } | 1392 | } |
1386 | } | 1393 | } |
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c index 4e3bfbcdf155..3485c63d20b0 100644 --- a/drivers/memstick/host/jmb38x_ms.c +++ b/drivers/memstick/host/jmb38x_ms.c | |||
@@ -50,6 +50,7 @@ struct jmb38x_ms_host { | |||
50 | struct jmb38x_ms *chip; | 50 | struct jmb38x_ms *chip; |
51 | void __iomem *addr; | 51 | void __iomem *addr; |
52 | spinlock_t lock; | 52 | spinlock_t lock; |
53 | struct tasklet_struct notify; | ||
53 | int id; | 54 | int id; |
54 | char host_id[32]; | 55 | char host_id[32]; |
55 | int irq; | 56 | int irq; |
@@ -590,55 +591,97 @@ static void jmb38x_ms_abort(unsigned long data) | |||
590 | spin_unlock_irqrestore(&host->lock, flags); | 591 | spin_unlock_irqrestore(&host->lock, flags); |
591 | } | 592 | } |
592 | 593 | ||
593 | static void jmb38x_ms_request(struct memstick_host *msh) | 594 | static void jmb38x_ms_req_tasklet(unsigned long data) |
594 | { | 595 | { |
596 | struct memstick_host *msh = (struct memstick_host *)data; | ||
595 | struct jmb38x_ms_host *host = memstick_priv(msh); | 597 | struct jmb38x_ms_host *host = memstick_priv(msh); |
596 | unsigned long flags; | 598 | unsigned long flags; |
597 | int rc; | 599 | int rc; |
598 | 600 | ||
599 | spin_lock_irqsave(&host->lock, flags); | 601 | spin_lock_irqsave(&host->lock, flags); |
600 | if (host->req) { | 602 | if (!host->req) { |
601 | spin_unlock_irqrestore(&host->lock, flags); | 603 | do { |
602 | BUG(); | 604 | rc = memstick_next_req(msh, &host->req); |
603 | return; | 605 | dev_dbg(&host->chip->pdev->dev, "tasklet req %d\n", rc); |
606 | } while (!rc && jmb38x_ms_issue_cmd(msh)); | ||
604 | } | 607 | } |
605 | |||
606 | do { | ||
607 | rc = memstick_next_req(msh, &host->req); | ||
608 | } while (!rc && jmb38x_ms_issue_cmd(msh)); | ||
609 | spin_unlock_irqrestore(&host->lock, flags); | 608 | spin_unlock_irqrestore(&host->lock, flags); |
610 | } | 609 | } |
611 | 610 | ||
612 | static void jmb38x_ms_reset(struct jmb38x_ms_host *host) | 611 | static void jmb38x_ms_dummy_submit(struct memstick_host *msh) |
613 | { | 612 | { |
614 | unsigned int host_ctl = readl(host->addr + HOST_CONTROL); | 613 | return; |
614 | } | ||
615 | |||
616 | static void jmb38x_ms_submit_req(struct memstick_host *msh) | ||
617 | { | ||
618 | struct jmb38x_ms_host *host = memstick_priv(msh); | ||
619 | |||
620 | tasklet_schedule(&host->notify); | ||
621 | } | ||
622 | |||
623 | static int jmb38x_ms_reset(struct jmb38x_ms_host *host) | ||
624 | { | ||
625 | int cnt; | ||
626 | |||
627 | writel(HOST_CONTROL_RESET_REQ | HOST_CONTROL_CLOCK_EN | ||
628 | | readl(host->addr + HOST_CONTROL), | ||
629 | host->addr + HOST_CONTROL); | ||
630 | mmiowb(); | ||
631 | |||
632 | for (cnt = 0; cnt < 20; ++cnt) { | ||
633 | if (!(HOST_CONTROL_RESET_REQ | ||
634 | & readl(host->addr + HOST_CONTROL))) | ||
635 | goto reset_next; | ||
615 | 636 | ||
616 | writel(HOST_CONTROL_RESET_REQ, host->addr + HOST_CONTROL); | 637 | ndelay(20); |
638 | } | ||
639 | dev_dbg(&host->chip->pdev->dev, "reset_req timeout\n"); | ||
640 | return -EIO; | ||
641 | |||
642 | reset_next: | ||
643 | writel(HOST_CONTROL_RESET | HOST_CONTROL_CLOCK_EN | ||
644 | | readl(host->addr + HOST_CONTROL), | ||
645 | host->addr + HOST_CONTROL); | ||
646 | mmiowb(); | ||
647 | |||
648 | for (cnt = 0; cnt < 20; ++cnt) { | ||
649 | if (!(HOST_CONTROL_RESET | ||
650 | & readl(host->addr + HOST_CONTROL))) | ||
651 | goto reset_ok; | ||
617 | 652 | ||
618 | while (HOST_CONTROL_RESET_REQ | ||
619 | & (host_ctl = readl(host->addr + HOST_CONTROL))) { | ||
620 | ndelay(20); | 653 | ndelay(20); |
621 | dev_dbg(&host->chip->pdev->dev, "reset %08x\n", host_ctl); | ||
622 | } | 654 | } |
655 | dev_dbg(&host->chip->pdev->dev, "reset timeout\n"); | ||
656 | return -EIO; | ||
623 | 657 | ||
624 | writel(HOST_CONTROL_RESET, host->addr + HOST_CONTROL); | 658 | reset_ok: |
625 | mmiowb(); | 659 | mmiowb(); |
626 | writel(INT_STATUS_ALL, host->addr + INT_SIGNAL_ENABLE); | 660 | writel(INT_STATUS_ALL, host->addr + INT_SIGNAL_ENABLE); |
627 | writel(INT_STATUS_ALL, host->addr + INT_STATUS_ENABLE); | 661 | writel(INT_STATUS_ALL, host->addr + INT_STATUS_ENABLE); |
662 | return 0; | ||
628 | } | 663 | } |
629 | 664 | ||
630 | static void jmb38x_ms_set_param(struct memstick_host *msh, | 665 | static int jmb38x_ms_set_param(struct memstick_host *msh, |
631 | enum memstick_param param, | 666 | enum memstick_param param, |
632 | int value) | 667 | int value) |
633 | { | 668 | { |
634 | struct jmb38x_ms_host *host = memstick_priv(msh); | 669 | struct jmb38x_ms_host *host = memstick_priv(msh); |
635 | unsigned int host_ctl = readl(host->addr + HOST_CONTROL); | 670 | unsigned int host_ctl = readl(host->addr + HOST_CONTROL); |
636 | unsigned int clock_ctl = CLOCK_CONTROL_40MHZ, clock_delay = 0; | 671 | unsigned int clock_ctl = CLOCK_CONTROL_40MHZ, clock_delay = 0; |
672 | int rc = 0; | ||
637 | 673 | ||
638 | switch (param) { | 674 | switch (param) { |
639 | case MEMSTICK_POWER: | 675 | case MEMSTICK_POWER: |
640 | if (value == MEMSTICK_POWER_ON) { | 676 | if (value == MEMSTICK_POWER_ON) { |
641 | jmb38x_ms_reset(host); | 677 | rc = jmb38x_ms_reset(host); |
678 | if (rc) | ||
679 | return rc; | ||
680 | |||
681 | host_ctl = 7; | ||
682 | host_ctl |= HOST_CONTROL_POWER_EN | ||
683 | | HOST_CONTROL_CLOCK_EN; | ||
684 | writel(host_ctl, host->addr + HOST_CONTROL); | ||
642 | 685 | ||
643 | writel(host->id ? PAD_PU_PD_ON_MS_SOCK1 | 686 | writel(host->id ? PAD_PU_PD_ON_MS_SOCK1 |
644 | : PAD_PU_PD_ON_MS_SOCK0, | 687 | : PAD_PU_PD_ON_MS_SOCK0, |
@@ -647,11 +690,7 @@ static void jmb38x_ms_set_param(struct memstick_host *msh, | |||
647 | writel(PAD_OUTPUT_ENABLE_MS, | 690 | writel(PAD_OUTPUT_ENABLE_MS, |
648 | host->addr + PAD_OUTPUT_ENABLE); | 691 | host->addr + PAD_OUTPUT_ENABLE); |
649 | 692 | ||
650 | host_ctl = 7; | 693 | msleep(10); |
651 | host_ctl |= HOST_CONTROL_POWER_EN | ||
652 | | HOST_CONTROL_CLOCK_EN; | ||
653 | writel(host_ctl, host->addr + HOST_CONTROL); | ||
654 | |||
655 | dev_dbg(&host->chip->pdev->dev, "power on\n"); | 694 | dev_dbg(&host->chip->pdev->dev, "power on\n"); |
656 | } else if (value == MEMSTICK_POWER_OFF) { | 695 | } else if (value == MEMSTICK_POWER_OFF) { |
657 | host_ctl &= ~(HOST_CONTROL_POWER_EN | 696 | host_ctl &= ~(HOST_CONTROL_POWER_EN |
@@ -660,7 +699,8 @@ static void jmb38x_ms_set_param(struct memstick_host *msh, | |||
660 | writel(0, host->addr + PAD_OUTPUT_ENABLE); | 699 | writel(0, host->addr + PAD_OUTPUT_ENABLE); |
661 | writel(PAD_PU_PD_OFF, host->addr + PAD_PU_PD); | 700 | writel(PAD_PU_PD_OFF, host->addr + PAD_PU_PD); |
662 | dev_dbg(&host->chip->pdev->dev, "power off\n"); | 701 | dev_dbg(&host->chip->pdev->dev, "power off\n"); |
663 | } | 702 | } else |
703 | return -EINVAL; | ||
664 | break; | 704 | break; |
665 | case MEMSTICK_INTERFACE: | 705 | case MEMSTICK_INTERFACE: |
666 | host_ctl &= ~(3 << HOST_CONTROL_IF_SHIFT); | 706 | host_ctl &= ~(3 << HOST_CONTROL_IF_SHIFT); |
@@ -686,12 +726,14 @@ static void jmb38x_ms_set_param(struct memstick_host *msh, | |||
686 | host_ctl &= ~HOST_CONTROL_REI; | 726 | host_ctl &= ~HOST_CONTROL_REI; |
687 | clock_ctl = CLOCK_CONTROL_60MHZ; | 727 | clock_ctl = CLOCK_CONTROL_60MHZ; |
688 | clock_delay = 0; | 728 | clock_delay = 0; |
689 | } | 729 | } else |
730 | return -EINVAL; | ||
690 | writel(host_ctl, host->addr + HOST_CONTROL); | 731 | writel(host_ctl, host->addr + HOST_CONTROL); |
691 | writel(clock_ctl, host->addr + CLOCK_CONTROL); | 732 | writel(clock_ctl, host->addr + CLOCK_CONTROL); |
692 | writel(clock_delay, host->addr + CLOCK_DELAY); | 733 | writel(clock_delay, host->addr + CLOCK_DELAY); |
693 | break; | 734 | break; |
694 | }; | 735 | }; |
736 | return 0; | ||
695 | } | 737 | } |
696 | 738 | ||
697 | #ifdef CONFIG_PM | 739 | #ifdef CONFIG_PM |
@@ -785,7 +827,9 @@ static struct memstick_host *jmb38x_ms_alloc_host(struct jmb38x_ms *jm, int cnt) | |||
785 | host->id); | 827 | host->id); |
786 | host->irq = jm->pdev->irq; | 828 | host->irq = jm->pdev->irq; |
787 | host->timeout_jiffies = msecs_to_jiffies(1000); | 829 | host->timeout_jiffies = msecs_to_jiffies(1000); |
788 | msh->request = jmb38x_ms_request; | 830 | |
831 | tasklet_init(&host->notify, jmb38x_ms_req_tasklet, (unsigned long)msh); | ||
832 | msh->request = jmb38x_ms_submit_req; | ||
789 | msh->set_param = jmb38x_ms_set_param; | 833 | msh->set_param = jmb38x_ms_set_param; |
790 | 834 | ||
791 | msh->caps = MEMSTICK_CAP_PAR4 | MEMSTICK_CAP_PAR8; | 835 | msh->caps = MEMSTICK_CAP_PAR4 | MEMSTICK_CAP_PAR8; |
@@ -897,6 +941,8 @@ static void jmb38x_ms_remove(struct pci_dev *dev) | |||
897 | 941 | ||
898 | host = memstick_priv(jm->hosts[cnt]); | 942 | host = memstick_priv(jm->hosts[cnt]); |
899 | 943 | ||
944 | jm->hosts[cnt]->request = jmb38x_ms_dummy_submit; | ||
945 | tasklet_kill(&host->notify); | ||
900 | writel(0, host->addr + INT_SIGNAL_ENABLE); | 946 | writel(0, host->addr + INT_SIGNAL_ENABLE); |
901 | writel(0, host->addr + INT_STATUS_ENABLE); | 947 | writel(0, host->addr + INT_STATUS_ENABLE); |
902 | mmiowb(); | 948 | mmiowb(); |
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c index 8577de4ebb0e..d32d6ad8f3fc 100644 --- a/drivers/memstick/host/tifm_ms.c +++ b/drivers/memstick/host/tifm_ms.c | |||
@@ -71,6 +71,7 @@ struct tifm_ms { | |||
71 | struct tifm_dev *dev; | 71 | struct tifm_dev *dev; |
72 | struct timer_list timer; | 72 | struct timer_list timer; |
73 | struct memstick_request *req; | 73 | struct memstick_request *req; |
74 | struct tasklet_struct notify; | ||
74 | unsigned int mode_mask; | 75 | unsigned int mode_mask; |
75 | unsigned int block_pos; | 76 | unsigned int block_pos; |
76 | unsigned long timeout_jiffies; | 77 | unsigned long timeout_jiffies; |
@@ -455,49 +456,51 @@ static void tifm_ms_card_event(struct tifm_dev *sock) | |||
455 | return; | 456 | return; |
456 | } | 457 | } |
457 | 458 | ||
458 | static void tifm_ms_request(struct memstick_host *msh) | 459 | static void tifm_ms_req_tasklet(unsigned long data) |
459 | { | 460 | { |
461 | struct memstick_host *msh = (struct memstick_host *)data; | ||
460 | struct tifm_ms *host = memstick_priv(msh); | 462 | struct tifm_ms *host = memstick_priv(msh); |
461 | struct tifm_dev *sock = host->dev; | 463 | struct tifm_dev *sock = host->dev; |
462 | unsigned long flags; | 464 | unsigned long flags; |
463 | int rc; | 465 | int rc; |
464 | 466 | ||
465 | spin_lock_irqsave(&sock->lock, flags); | 467 | spin_lock_irqsave(&sock->lock, flags); |
466 | if (host->req) { | 468 | if (!host->req) { |
467 | printk(KERN_ERR "%s : unfinished request detected\n", | 469 | if (host->eject) { |
468 | sock->dev.bus_id); | 470 | do { |
469 | spin_unlock_irqrestore(&sock->lock, flags); | 471 | rc = memstick_next_req(msh, &host->req); |
470 | tifm_eject(host->dev); | 472 | if (!rc) |
471 | return; | 473 | host->req->error = -ETIME; |
472 | } | 474 | } while (!rc); |
475 | spin_unlock_irqrestore(&sock->lock, flags); | ||
476 | return; | ||
477 | } | ||
473 | 478 | ||
474 | if (host->eject) { | ||
475 | do { | 479 | do { |
476 | rc = memstick_next_req(msh, &host->req); | 480 | rc = memstick_next_req(msh, &host->req); |
477 | if (!rc) | 481 | } while (!rc && tifm_ms_issue_cmd(host)); |
478 | host->req->error = -ETIME; | ||
479 | } while (!rc); | ||
480 | spin_unlock_irqrestore(&sock->lock, flags); | ||
481 | return; | ||
482 | } | 482 | } |
483 | |||
484 | do { | ||
485 | rc = memstick_next_req(msh, &host->req); | ||
486 | } while (!rc && tifm_ms_issue_cmd(host)); | ||
487 | |||
488 | spin_unlock_irqrestore(&sock->lock, flags); | 483 | spin_unlock_irqrestore(&sock->lock, flags); |
484 | } | ||
485 | |||
486 | static void tifm_ms_dummy_submit(struct memstick_host *msh) | ||
487 | { | ||
489 | return; | 488 | return; |
490 | } | 489 | } |
491 | 490 | ||
492 | static void tifm_ms_set_param(struct memstick_host *msh, | 491 | static void tifm_ms_submit_req(struct memstick_host *msh) |
493 | enum memstick_param param, | ||
494 | int value) | ||
495 | { | 492 | { |
496 | struct tifm_ms *host = memstick_priv(msh); | 493 | struct tifm_ms *host = memstick_priv(msh); |
497 | struct tifm_dev *sock = host->dev; | ||
498 | unsigned long flags; | ||
499 | 494 | ||
500 | spin_lock_irqsave(&sock->lock, flags); | 495 | tasklet_schedule(&host->notify); |
496 | } | ||
497 | |||
498 | static int tifm_ms_set_param(struct memstick_host *msh, | ||
499 | enum memstick_param param, | ||
500 | int value) | ||
501 | { | ||
502 | struct tifm_ms *host = memstick_priv(msh); | ||
503 | struct tifm_dev *sock = host->dev; | ||
501 | 504 | ||
502 | switch (param) { | 505 | switch (param) { |
503 | case MEMSTICK_POWER: | 506 | case MEMSTICK_POWER: |
@@ -512,7 +515,8 @@ static void tifm_ms_set_param(struct memstick_host *msh, | |||
512 | writel(TIFM_MS_SYS_FCLR | TIFM_MS_SYS_INTCLR, | 515 | writel(TIFM_MS_SYS_FCLR | TIFM_MS_SYS_INTCLR, |
513 | sock->addr + SOCK_MS_SYSTEM); | 516 | sock->addr + SOCK_MS_SYSTEM); |
514 | writel(0xffffffff, sock->addr + SOCK_MS_STATUS); | 517 | writel(0xffffffff, sock->addr + SOCK_MS_STATUS); |
515 | } | 518 | } else |
519 | return -EINVAL; | ||
516 | break; | 520 | break; |
517 | case MEMSTICK_INTERFACE: | 521 | case MEMSTICK_INTERFACE: |
518 | if (value == MEMSTICK_SERIAL) { | 522 | if (value == MEMSTICK_SERIAL) { |
@@ -525,11 +529,12 @@ static void tifm_ms_set_param(struct memstick_host *msh, | |||
525 | writel(TIFM_CTRL_FAST_CLK | 529 | writel(TIFM_CTRL_FAST_CLK |
526 | | readl(sock->addr + SOCK_CONTROL), | 530 | | readl(sock->addr + SOCK_CONTROL), |
527 | sock->addr + SOCK_CONTROL); | 531 | sock->addr + SOCK_CONTROL); |
528 | } | 532 | } else |
533 | return -EINVAL; | ||
529 | break; | 534 | break; |
530 | }; | 535 | }; |
531 | 536 | ||
532 | spin_unlock_irqrestore(&sock->lock, flags); | 537 | return 0; |
533 | } | 538 | } |
534 | 539 | ||
535 | static void tifm_ms_abort(unsigned long data) | 540 | static void tifm_ms_abort(unsigned long data) |
@@ -570,8 +575,9 @@ static int tifm_ms_probe(struct tifm_dev *sock) | |||
570 | host->timeout_jiffies = msecs_to_jiffies(1000); | 575 | host->timeout_jiffies = msecs_to_jiffies(1000); |
571 | 576 | ||
572 | setup_timer(&host->timer, tifm_ms_abort, (unsigned long)host); | 577 | setup_timer(&host->timer, tifm_ms_abort, (unsigned long)host); |
578 | tasklet_init(&host->notify, tifm_ms_req_tasklet, (unsigned long)msh); | ||
573 | 579 | ||
574 | msh->request = tifm_ms_request; | 580 | msh->request = tifm_ms_submit_req; |
575 | msh->set_param = tifm_ms_set_param; | 581 | msh->set_param = tifm_ms_set_param; |
576 | sock->card_event = tifm_ms_card_event; | 582 | sock->card_event = tifm_ms_card_event; |
577 | sock->data_event = tifm_ms_data_event; | 583 | sock->data_event = tifm_ms_data_event; |
@@ -593,6 +599,8 @@ static void tifm_ms_remove(struct tifm_dev *sock) | |||
593 | int rc = 0; | 599 | int rc = 0; |
594 | unsigned long flags; | 600 | unsigned long flags; |
595 | 601 | ||
602 | msh->request = tifm_ms_dummy_submit; | ||
603 | tasklet_kill(&host->notify); | ||
596 | spin_lock_irqsave(&sock->lock, flags); | 604 | spin_lock_irqsave(&sock->lock, flags); |
597 | host->eject = 1; | 605 | host->eject = 1; |
598 | if (host->req) { | 606 | if (host->req) { |
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt index 241592ab13ad..3f15fcfe4a2e 100644 --- a/drivers/message/fusion/lsi/mpi_history.txt +++ b/drivers/message/fusion/lsi/mpi_history.txt | |||
@@ -127,7 +127,7 @@ mpi_ioc.h | |||
127 | * 08-08-01 01.02.01 Original release for v1.2 work. | 127 | * 08-08-01 01.02.01 Original release for v1.2 work. |
128 | * New format for FWVersion and ProductId in | 128 | * New format for FWVersion and ProductId in |
129 | * MSG_IOC_FACTS_REPLY and MPI_FW_HEADER. | 129 | * MSG_IOC_FACTS_REPLY and MPI_FW_HEADER. |
130 | * 08-31-01 01.02.02 Addded event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and | 130 | * 08-31-01 01.02.02 Added event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and |
131 | * related structure and defines. | 131 | * related structure and defines. |
132 | * Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED. | 132 | * Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED. |
133 | * Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE. | 133 | * Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE. |
@@ -187,7 +187,7 @@ mpi_ioc.h | |||
187 | * 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED. | 187 | * 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED. |
188 | * Added MaxInitiators field to PortFacts reply. | 188 | * Added MaxInitiators field to PortFacts reply. |
189 | * Added SAS Device Status Change ReasonCode for | 189 | * Added SAS Device Status Change ReasonCode for |
190 | * asynchronous notificaiton. | 190 | * asynchronous notification. |
191 | * Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event | 191 | * Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event |
192 | * data structure. | 192 | * data structure. |
193 | * Added new ImageType values for FWDownload and FWUpload | 193 | * Added new ImageType values for FWDownload and FWUpload |
@@ -213,7 +213,7 @@ mpi_cnfg.h | |||
213 | * Added _RESPONSE_ID_MASK definition to SCSI_PORT_1 | 213 | * Added _RESPONSE_ID_MASK definition to SCSI_PORT_1 |
214 | * page and updated the page version. | 214 | * page and updated the page version. |
215 | * Added Information field and _INFO_PARAMS_NEGOTIATED | 215 | * Added Information field and _INFO_PARAMS_NEGOTIATED |
216 | * definitionto SCSI_DEVICE_0 page. | 216 | * definition to SCSI_DEVICE_0 page. |
217 | * 06-22-00 01.00.03 Removed batch controls from LAN_0 page and updated the | 217 | * 06-22-00 01.00.03 Removed batch controls from LAN_0 page and updated the |
218 | * page version. | 218 | * page version. |
219 | * Added BucketsRemaining to LAN_1 page, redefined the | 219 | * Added BucketsRemaining to LAN_1 page, redefined the |
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 3b870e7fb3e1..eabf0bfccab4 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c | |||
@@ -314,10 +314,12 @@ static int __init asic3_irq_probe(struct platform_device *pdev) | |||
314 | unsigned long clksel = 0; | 314 | unsigned long clksel = 0; |
315 | unsigned int irq, irq_base; | 315 | unsigned int irq, irq_base; |
316 | int map_size; | 316 | int map_size; |
317 | int ret; | ||
317 | 318 | ||
318 | asic->irq_nr = platform_get_irq(pdev, 0); | 319 | ret = platform_get_irq(pdev, 0); |
319 | if (asic->irq_nr < 0) | 320 | if (ret < 0) |
320 | return asic->irq_nr; | 321 | return ret; |
322 | asic->irq_nr = ret; | ||
321 | 323 | ||
322 | /* turn on clock to IRQ controller */ | 324 | /* turn on clock to IRQ controller */ |
323 | clksel |= CLOCK_SEL_CX; | 325 | clksel |= CLOCK_SEL_CX; |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index c3a5db72ddd7..5f95e10229b5 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -337,7 +337,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, | |||
337 | 337 | ||
338 | host->align_addr = dma_map_single(mmc_dev(host->mmc), | 338 | host->align_addr = dma_map_single(mmc_dev(host->mmc), |
339 | host->align_buffer, 128 * 4, direction); | 339 | host->align_buffer, 128 * 4, direction); |
340 | if (dma_mapping_error(host->align_addr)) | 340 | if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) |
341 | goto fail; | 341 | goto fail; |
342 | BUG_ON(host->align_addr & 0x3); | 342 | BUG_ON(host->align_addr & 0x3); |
343 | 343 | ||
@@ -439,7 +439,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, | |||
439 | 439 | ||
440 | host->adma_addr = dma_map_single(mmc_dev(host->mmc), | 440 | host->adma_addr = dma_map_single(mmc_dev(host->mmc), |
441 | host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); | 441 | host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); |
442 | if (dma_mapping_error(host->align_addr)) | 442 | if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) |
443 | goto unmap_entries; | 443 | goto unmap_entries; |
444 | BUG_ON(host->adma_addr & 0x3); | 444 | BUG_ON(host->adma_addr & 0x3); |
445 | 445 | ||
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index a06bf8b89343..e354faee5df0 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
@@ -9,6 +9,8 @@ | |||
9 | * your option) any later version. | 9 | * your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/scatterlist.h> | ||
13 | |||
12 | /* | 14 | /* |
13 | * Controller registers | 15 | * Controller registers |
14 | */ | 16 | */ |
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index 7a14980f3472..18d3eeb7eab2 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c | |||
@@ -482,7 +482,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) | |||
482 | goto err; | 482 | goto err; |
483 | 483 | ||
484 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); | 484 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); |
485 | if (dma_mapping_error(d)) { | 485 | if (dma_mapping_error(NULL, d)) { |
486 | free_page((unsigned long)page); | 486 | free_page((unsigned long)page); |
487 | goto err; | 487 | goto err; |
488 | } | 488 | } |
@@ -505,7 +505,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) | |||
505 | goto err; | 505 | goto err; |
506 | 506 | ||
507 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); | 507 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); |
508 | if (dma_mapping_error(d)) { | 508 | if (dma_mapping_error(NULL, d)) { |
509 | free_page((unsigned long)page); | 509 | free_page((unsigned long)page); |
510 | goto err; | 510 | goto err; |
511 | } | 511 | } |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 0263bef9cc6d..c7cc760a1777 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -1020,7 +1020,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, | |||
1020 | 1020 | ||
1021 | mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE, | 1021 | mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE, |
1022 | PCI_DMA_FROMDEVICE); | 1022 | PCI_DMA_FROMDEVICE); |
1023 | if (unlikely(dma_mapping_error(mapping))) { | 1023 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
1024 | __free_pages(page, PAGES_PER_SGE_SHIFT); | 1024 | __free_pages(page, PAGES_PER_SGE_SHIFT); |
1025 | return -ENOMEM; | 1025 | return -ENOMEM; |
1026 | } | 1026 | } |
@@ -1048,7 +1048,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, | |||
1048 | 1048 | ||
1049 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, | 1049 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, |
1050 | PCI_DMA_FROMDEVICE); | 1050 | PCI_DMA_FROMDEVICE); |
1051 | if (unlikely(dma_mapping_error(mapping))) { | 1051 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
1052 | dev_kfree_skb(skb); | 1052 | dev_kfree_skb(skb); |
1053 | return -ENOMEM; | 1053 | return -ENOMEM; |
1054 | } | 1054 | } |
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 83768df27806..f1936d51b458 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -576,6 +576,18 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags) | |||
576 | list_for_each_safe(elem, tmp, &list) { | 576 | list_for_each_safe(elem, tmp, &list) { |
577 | cas_page_t *page = list_entry(elem, cas_page_t, list); | 577 | cas_page_t *page = list_entry(elem, cas_page_t, list); |
578 | 578 | ||
579 | /* | ||
580 | * With the lockless pagecache, cassini buffering scheme gets | ||
581 | * slightly less accurate: we might find that a page has an | ||
582 | * elevated reference count here, due to a speculative ref, | ||
583 | * and skip it as in-use. Ideally we would be able to reclaim | ||
584 | * it. However this would be such a rare case, it doesn't | ||
585 | * matter too much as we should pick it up the next time round. | ||
586 | * | ||
587 | * Importantly, if we find that the page has a refcount of 1 | ||
588 | * here (our refcount), then we know it is definitely not inuse | ||
589 | * so we can reuse it. | ||
590 | */ | ||
579 | if (page_count(page->buffer) > 1) | 591 | if (page_count(page->buffer) > 1) |
580 | continue; | 592 | continue; |
581 | 593 | ||
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index a96331c875e6..1b0861d73ab7 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -386,7 +386,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len, | |||
386 | dma_addr_t mapping; | 386 | dma_addr_t mapping; |
387 | 387 | ||
388 | mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); | 388 | mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); |
389 | if (unlikely(pci_dma_mapping_error(mapping))) | 389 | if (unlikely(pci_dma_mapping_error(pdev, mapping))) |
390 | return -ENOMEM; | 390 | return -ENOMEM; |
391 | 391 | ||
392 | pci_unmap_addr_set(sd, dma_addr, mapping); | 392 | pci_unmap_addr_set(sd, dma_addr, mapping); |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 1037b1332312..19d32a227be1 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -1790,7 +1790,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) | |||
1790 | rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, | 1790 | rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, |
1791 | RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); | 1791 | RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); |
1792 | 1792 | ||
1793 | if (pci_dma_mapping_error(rx->dma_addr)) { | 1793 | if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { |
1794 | dev_kfree_skb_any(rx->skb); | 1794 | dev_kfree_skb_any(rx->skb); |
1795 | rx->skb = NULL; | 1795 | rx->skb = NULL; |
1796 | rx->dma_addr = 0; | 1796 | rx->dma_addr = 0; |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index a14561f40db0..9350564065e7 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -1090,7 +1090,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1090 | tx_ring->buffer_info[i].dma = | 1090 | tx_ring->buffer_info[i].dma = |
1091 | pci_map_single(pdev, skb->data, skb->len, | 1091 | pci_map_single(pdev, skb->data, skb->len, |
1092 | PCI_DMA_TODEVICE); | 1092 | PCI_DMA_TODEVICE); |
1093 | if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) { | 1093 | if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) { |
1094 | ret_val = 4; | 1094 | ret_val = 4; |
1095 | goto err_nomem; | 1095 | goto err_nomem; |
1096 | } | 1096 | } |
@@ -1153,7 +1153,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1153 | rx_ring->buffer_info[i].dma = | 1153 | rx_ring->buffer_info[i].dma = |
1154 | pci_map_single(pdev, skb->data, 2048, | 1154 | pci_map_single(pdev, skb->data, 2048, |
1155 | PCI_DMA_FROMDEVICE); | 1155 | PCI_DMA_FROMDEVICE); |
1156 | if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) { | 1156 | if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) { |
1157 | ret_val = 8; | 1157 | ret_val = 8; |
1158 | goto err_nomem; | 1158 | goto err_nomem; |
1159 | } | 1159 | } |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 9c0f56b3c518..d13677899767 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -195,7 +195,7 @@ map_skb: | |||
195 | buffer_info->dma = pci_map_single(pdev, skb->data, | 195 | buffer_info->dma = pci_map_single(pdev, skb->data, |
196 | adapter->rx_buffer_len, | 196 | adapter->rx_buffer_len, |
197 | PCI_DMA_FROMDEVICE); | 197 | PCI_DMA_FROMDEVICE); |
198 | if (pci_dma_mapping_error(buffer_info->dma)) { | 198 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) { |
199 | dev_err(&pdev->dev, "RX DMA map failed\n"); | 199 | dev_err(&pdev->dev, "RX DMA map failed\n"); |
200 | adapter->rx_dma_failed++; | 200 | adapter->rx_dma_failed++; |
201 | break; | 201 | break; |
@@ -265,7 +265,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
265 | ps_page->page, | 265 | ps_page->page, |
266 | 0, PAGE_SIZE, | 266 | 0, PAGE_SIZE, |
267 | PCI_DMA_FROMDEVICE); | 267 | PCI_DMA_FROMDEVICE); |
268 | if (pci_dma_mapping_error(ps_page->dma)) { | 268 | if (pci_dma_mapping_error(pdev, ps_page->dma)) { |
269 | dev_err(&adapter->pdev->dev, | 269 | dev_err(&adapter->pdev->dev, |
270 | "RX DMA page map failed\n"); | 270 | "RX DMA page map failed\n"); |
271 | adapter->rx_dma_failed++; | 271 | adapter->rx_dma_failed++; |
@@ -300,7 +300,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
300 | buffer_info->dma = pci_map_single(pdev, skb->data, | 300 | buffer_info->dma = pci_map_single(pdev, skb->data, |
301 | adapter->rx_ps_bsize0, | 301 | adapter->rx_ps_bsize0, |
302 | PCI_DMA_FROMDEVICE); | 302 | PCI_DMA_FROMDEVICE); |
303 | if (pci_dma_mapping_error(buffer_info->dma)) { | 303 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) { |
304 | dev_err(&pdev->dev, "RX DMA map failed\n"); | 304 | dev_err(&pdev->dev, "RX DMA map failed\n"); |
305 | adapter->rx_dma_failed++; | 305 | adapter->rx_dma_failed++; |
306 | /* cleanup skb */ | 306 | /* cleanup skb */ |
@@ -3344,7 +3344,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3344 | skb->data + offset, | 3344 | skb->data + offset, |
3345 | size, | 3345 | size, |
3346 | PCI_DMA_TODEVICE); | 3346 | PCI_DMA_TODEVICE); |
3347 | if (pci_dma_mapping_error(buffer_info->dma)) { | 3347 | if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) { |
3348 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); | 3348 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); |
3349 | adapter->tx_dma_failed++; | 3349 | adapter->tx_dma_failed++; |
3350 | return -1; | 3350 | return -1; |
@@ -3382,7 +3382,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3382 | offset, | 3382 | offset, |
3383 | size, | 3383 | size, |
3384 | PCI_DMA_TODEVICE); | 3384 | PCI_DMA_TODEVICE); |
3385 | if (pci_dma_mapping_error(buffer_info->dma)) { | 3385 | if (pci_dma_mapping_error(adapter->pdev, |
3386 | buffer_info->dma)) { | ||
3386 | dev_err(&adapter->pdev->dev, | 3387 | dev_err(&adapter->pdev->dev, |
3387 | "TX DMA page map failed\n"); | 3388 | "TX DMA page map failed\n"); |
3388 | adapter->tx_dma_failed++; | 3389 | adapter->tx_dma_failed++; |
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index e5a6e2e84540..91ec9fdc7184 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -260,7 +260,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
260 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, | 260 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, |
261 | pool->buff_size, DMA_FROM_DEVICE); | 261 | pool->buff_size, DMA_FROM_DEVICE); |
262 | 262 | ||
263 | if (dma_mapping_error(dma_addr)) | 263 | if (dma_mapping_error((&adapter->vdev->dev, dma_addr)) |
264 | goto failure; | 264 | goto failure; |
265 | 265 | ||
266 | pool->free_map[free_index] = IBM_VETH_INVALID_MAP; | 266 | pool->free_map[free_index] = IBM_VETH_INVALID_MAP; |
@@ -294,7 +294,7 @@ failure: | |||
294 | pool->consumer_index = pool->size - 1; | 294 | pool->consumer_index = pool->size - 1; |
295 | else | 295 | else |
296 | pool->consumer_index--; | 296 | pool->consumer_index--; |
297 | if (!dma_mapping_error(dma_addr)) | 297 | if (!dma_mapping_error((&adapter->vdev->dev, dma_addr)) |
298 | dma_unmap_single(&adapter->vdev->dev, | 298 | dma_unmap_single(&adapter->vdev->dev, |
299 | pool->dma_addr[index], pool->buff_size, | 299 | pool->dma_addr[index], pool->buff_size, |
300 | DMA_FROM_DEVICE); | 300 | DMA_FROM_DEVICE); |
@@ -448,11 +448,11 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) | |||
448 | static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | 448 | static void ibmveth_cleanup(struct ibmveth_adapter *adapter) |
449 | { | 449 | { |
450 | int i; | 450 | int i; |
451 | struct device *dev = &adapter->vdev->dev; | ||
451 | 452 | ||
452 | if(adapter->buffer_list_addr != NULL) { | 453 | if(adapter->buffer_list_addr != NULL) { |
453 | if(!dma_mapping_error(adapter->buffer_list_dma)) { | 454 | if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { |
454 | dma_unmap_single(&adapter->vdev->dev, | 455 | dma_unmap_single(dev, adapter->buffer_list_dma, 4096, |
455 | adapter->buffer_list_dma, 4096, | ||
456 | DMA_BIDIRECTIONAL); | 456 | DMA_BIDIRECTIONAL); |
457 | adapter->buffer_list_dma = DMA_ERROR_CODE; | 457 | adapter->buffer_list_dma = DMA_ERROR_CODE; |
458 | } | 458 | } |
@@ -461,9 +461,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
461 | } | 461 | } |
462 | 462 | ||
463 | if(adapter->filter_list_addr != NULL) { | 463 | if(adapter->filter_list_addr != NULL) { |
464 | if(!dma_mapping_error(adapter->filter_list_dma)) { | 464 | if (!dma_mapping_error(dev, adapter->filter_list_dma)) { |
465 | dma_unmap_single(&adapter->vdev->dev, | 465 | dma_unmap_single(dev, adapter->filter_list_dma, 4096, |
466 | adapter->filter_list_dma, 4096, | ||
467 | DMA_BIDIRECTIONAL); | 466 | DMA_BIDIRECTIONAL); |
468 | adapter->filter_list_dma = DMA_ERROR_CODE; | 467 | adapter->filter_list_dma = DMA_ERROR_CODE; |
469 | } | 468 | } |
@@ -472,8 +471,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
472 | } | 471 | } |
473 | 472 | ||
474 | if(adapter->rx_queue.queue_addr != NULL) { | 473 | if(adapter->rx_queue.queue_addr != NULL) { |
475 | if(!dma_mapping_error(adapter->rx_queue.queue_dma)) { | 474 | if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { |
476 | dma_unmap_single(&adapter->vdev->dev, | 475 | dma_unmap_single(dev, |
477 | adapter->rx_queue.queue_dma, | 476 | adapter->rx_queue.queue_dma, |
478 | adapter->rx_queue.queue_len, | 477 | adapter->rx_queue.queue_len, |
479 | DMA_BIDIRECTIONAL); | 478 | DMA_BIDIRECTIONAL); |
@@ -535,6 +534,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
535 | int rc; | 534 | int rc; |
536 | union ibmveth_buf_desc rxq_desc; | 535 | union ibmveth_buf_desc rxq_desc; |
537 | int i; | 536 | int i; |
537 | struct device *dev; | ||
538 | 538 | ||
539 | ibmveth_debug_printk("open starting\n"); | 539 | ibmveth_debug_printk("open starting\n"); |
540 | 540 | ||
@@ -563,17 +563,19 @@ static int ibmveth_open(struct net_device *netdev) | |||
563 | return -ENOMEM; | 563 | return -ENOMEM; |
564 | } | 564 | } |
565 | 565 | ||
566 | adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev, | 566 | dev = &adapter->vdev->dev; |
567 | |||
568 | adapter->buffer_list_dma = dma_map_single(dev, | ||
567 | adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); | 569 | adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); |
568 | adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev, | 570 | adapter->filter_list_dma = dma_map_single(dev, |
569 | adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); | 571 | adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); |
570 | adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev, | 572 | adapter->rx_queue.queue_dma = dma_map_single(dev, |
571 | adapter->rx_queue.queue_addr, | 573 | adapter->rx_queue.queue_addr, |
572 | adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); | 574 | adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); |
573 | 575 | ||
574 | if((dma_mapping_error(adapter->buffer_list_dma) ) || | 576 | if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || |
575 | (dma_mapping_error(adapter->filter_list_dma)) || | 577 | (dma_mapping_error(dev, adapter->filter_list_dma)) || |
576 | (dma_mapping_error(adapter->rx_queue.queue_dma))) { | 578 | (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { |
577 | ibmveth_error_printk("unable to map filter or buffer list pages\n"); | 579 | ibmveth_error_printk("unable to map filter or buffer list pages\n"); |
578 | ibmveth_cleanup(adapter); | 580 | ibmveth_cleanup(adapter); |
579 | napi_disable(&adapter->napi); | 581 | napi_disable(&adapter->napi); |
@@ -645,7 +647,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
645 | adapter->bounce_buffer_dma = | 647 | adapter->bounce_buffer_dma = |
646 | dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, | 648 | dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, |
647 | netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); | 649 | netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); |
648 | if (dma_mapping_error(adapter->bounce_buffer_dma)) { | 650 | if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { |
649 | ibmveth_error_printk("unable to map bounce buffer\n"); | 651 | ibmveth_error_printk("unable to map bounce buffer\n"); |
650 | ibmveth_cleanup(adapter); | 652 | ibmveth_cleanup(adapter); |
651 | napi_disable(&adapter->napi); | 653 | napi_disable(&adapter->napi); |
@@ -922,7 +924,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
922 | buf[1] = 0; | 924 | buf[1] = 0; |
923 | } | 925 | } |
924 | 926 | ||
925 | if (dma_mapping_error(data_dma_addr)) { | 927 | if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) { |
926 | if (!firmware_has_feature(FW_FEATURE_CMO)) | 928 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
927 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); | 929 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); |
928 | skb_copy_from_linear_data(skb, adapter->bounce_buffer, | 930 | skb_copy_from_linear_data(skb, adapter->bounce_buffer, |
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index b8d0639c1cdf..c46864d626b2 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c | |||
@@ -1128,7 +1128,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, | |||
1128 | msg->data.addr[0] = dma_map_single(port->dev, skb->data, | 1128 | msg->data.addr[0] = dma_map_single(port->dev, skb->data, |
1129 | skb->len, DMA_TO_DEVICE); | 1129 | skb->len, DMA_TO_DEVICE); |
1130 | 1130 | ||
1131 | if (dma_mapping_error(msg->data.addr[0])) | 1131 | if (dma_mapping_error(port->dev, msg->data.addr[0])) |
1132 | goto recycle_and_drop; | 1132 | goto recycle_and_drop; |
1133 | 1133 | ||
1134 | msg->dev = port->dev; | 1134 | msg->dev = port->dev; |
@@ -1226,7 +1226,7 @@ static void veth_recycle_msg(struct veth_lpar_connection *cnx, | |||
1226 | dma_address = msg->data.addr[0]; | 1226 | dma_address = msg->data.addr[0]; |
1227 | dma_length = msg->data.len[0]; | 1227 | dma_length = msg->data.len[0]; |
1228 | 1228 | ||
1229 | if (!dma_mapping_error(dma_address)) | 1229 | if (!dma_mapping_error(msg->dev, dma_address)) |
1230 | dma_unmap_single(msg->dev, dma_address, dma_length, | 1230 | dma_unmap_single(msg->dev, dma_address, dma_length, |
1231 | DMA_TO_DEVICE); | 1231 | DMA_TO_DEVICE); |
1232 | 1232 | ||
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index ea3a09aaa844..7df928d3a3d8 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -526,7 +526,7 @@ int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) | |||
526 | return -ENOMEM; | 526 | return -ENOMEM; |
527 | priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, | 527 | priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, |
528 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 528 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
529 | if (pci_dma_mapping_error(priv->eq_table.icm_dma)) { | 529 | if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) { |
530 | __free_page(priv->eq_table.icm_page); | 530 | __free_page(priv->eq_table.icm_page); |
531 | return -ENOMEM; | 531 | return -ENOMEM; |
532 | } | 532 | } |
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 993d87c9296f..edc0fd588985 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -650,7 +650,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev, | |||
650 | mac->bufsz - LOCAL_SKB_ALIGN, | 650 | mac->bufsz - LOCAL_SKB_ALIGN, |
651 | PCI_DMA_FROMDEVICE); | 651 | PCI_DMA_FROMDEVICE); |
652 | 652 | ||
653 | if (unlikely(dma_mapping_error(dma))) { | 653 | if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) { |
654 | dev_kfree_skb_irq(info->skb); | 654 | dev_kfree_skb_irq(info->skb); |
655 | break; | 655 | break; |
656 | } | 656 | } |
@@ -1519,7 +1519,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1519 | map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), | 1519 | map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), |
1520 | PCI_DMA_TODEVICE); | 1520 | PCI_DMA_TODEVICE); |
1521 | map_size[0] = skb_headlen(skb); | 1521 | map_size[0] = skb_headlen(skb); |
1522 | if (dma_mapping_error(map[0])) | 1522 | if (pci_dma_mapping_error(mac->dma_pdev, map[0])) |
1523 | goto out_err_nolock; | 1523 | goto out_err_nolock; |
1524 | 1524 | ||
1525 | for (i = 0; i < nfrags; i++) { | 1525 | for (i = 0; i < nfrags; i++) { |
@@ -1529,7 +1529,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1529 | frag->page_offset, frag->size, | 1529 | frag->page_offset, frag->size, |
1530 | PCI_DMA_TODEVICE); | 1530 | PCI_DMA_TODEVICE); |
1531 | map_size[i+1] = frag->size; | 1531 | map_size[i+1] = frag->size; |
1532 | if (dma_mapping_error(map[i+1])) { | 1532 | if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) { |
1533 | nfrags = i; | 1533 | nfrags = i; |
1534 | goto out_err_nolock; | 1534 | goto out_err_nolock; |
1535 | } | 1535 | } |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index e7d48a352beb..e82b37bbd6c3 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -328,7 +328,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, | |||
328 | qdev->lrg_buffer_len - | 328 | qdev->lrg_buffer_len - |
329 | QL_HEADER_SPACE, | 329 | QL_HEADER_SPACE, |
330 | PCI_DMA_FROMDEVICE); | 330 | PCI_DMA_FROMDEVICE); |
331 | err = pci_dma_mapping_error(map); | 331 | err = pci_dma_mapping_error(qdev->pdev, map); |
332 | if(err) { | 332 | if(err) { |
333 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 333 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
334 | qdev->ndev->name, err); | 334 | qdev->ndev->name, err); |
@@ -1919,7 +1919,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) | |||
1919 | QL_HEADER_SPACE, | 1919 | QL_HEADER_SPACE, |
1920 | PCI_DMA_FROMDEVICE); | 1920 | PCI_DMA_FROMDEVICE); |
1921 | 1921 | ||
1922 | err = pci_dma_mapping_error(map); | 1922 | err = pci_dma_mapping_error(qdev->pdev, map); |
1923 | if(err) { | 1923 | if(err) { |
1924 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 1924 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
1925 | qdev->ndev->name, err); | 1925 | qdev->ndev->name, err); |
@@ -2454,7 +2454,7 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2454 | */ | 2454 | */ |
2455 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); | 2455 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); |
2456 | 2456 | ||
2457 | err = pci_dma_mapping_error(map); | 2457 | err = pci_dma_mapping_error(qdev->pdev, map); |
2458 | if(err) { | 2458 | if(err) { |
2459 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 2459 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
2460 | qdev->ndev->name, err); | 2460 | qdev->ndev->name, err); |
@@ -2487,7 +2487,7 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2487 | sizeof(struct oal), | 2487 | sizeof(struct oal), |
2488 | PCI_DMA_TODEVICE); | 2488 | PCI_DMA_TODEVICE); |
2489 | 2489 | ||
2490 | err = pci_dma_mapping_error(map); | 2490 | err = pci_dma_mapping_error(qdev->pdev, map); |
2491 | if(err) { | 2491 | if(err) { |
2492 | 2492 | ||
2493 | printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", | 2493 | printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", |
@@ -2514,7 +2514,7 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2514 | frag->page_offset, frag->size, | 2514 | frag->page_offset, frag->size, |
2515 | PCI_DMA_TODEVICE); | 2515 | PCI_DMA_TODEVICE); |
2516 | 2516 | ||
2517 | err = pci_dma_mapping_error(map); | 2517 | err = pci_dma_mapping_error(qdev->pdev, map); |
2518 | if(err) { | 2518 | if(err) { |
2519 | printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", | 2519 | printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", |
2520 | qdev->ndev->name, err); | 2520 | qdev->ndev->name, err); |
@@ -2916,7 +2916,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) | |||
2916 | QL_HEADER_SPACE, | 2916 | QL_HEADER_SPACE, |
2917 | PCI_DMA_FROMDEVICE); | 2917 | PCI_DMA_FROMDEVICE); |
2918 | 2918 | ||
2919 | err = pci_dma_mapping_error(map); | 2919 | err = pci_dma_mapping_error(qdev->pdev, map); |
2920 | if(err) { | 2920 | if(err) { |
2921 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 2921 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
2922 | qdev->ndev->name, err); | 2922 | qdev->ndev->name, err); |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 9dae40ccf048..86d77d05190a 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -2512,8 +2512,8 @@ static void stop_nic(struct s2io_nic *nic) | |||
2512 | * Return Value: | 2512 | * Return Value: |
2513 | * SUCCESS on success or an appropriate -ve value on failure. | 2513 | * SUCCESS on success or an appropriate -ve value on failure. |
2514 | */ | 2514 | */ |
2515 | 2515 | static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, | |
2516 | static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | 2516 | int from_card_up) |
2517 | { | 2517 | { |
2518 | struct sk_buff *skb; | 2518 | struct sk_buff *skb; |
2519 | struct RxD_t *rxdp; | 2519 | struct RxD_t *rxdp; |
@@ -2602,7 +2602,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | |||
2602 | rxdp1->Buffer0_ptr = pci_map_single | 2602 | rxdp1->Buffer0_ptr = pci_map_single |
2603 | (ring->pdev, skb->data, size - NET_IP_ALIGN, | 2603 | (ring->pdev, skb->data, size - NET_IP_ALIGN, |
2604 | PCI_DMA_FROMDEVICE); | 2604 | PCI_DMA_FROMDEVICE); |
2605 | if(pci_dma_mapping_error(rxdp1->Buffer0_ptr)) | 2605 | if (pci_dma_mapping_error(nic->pdev, |
2606 | rxdp1->Buffer0_ptr)) | ||
2606 | goto pci_map_failed; | 2607 | goto pci_map_failed; |
2607 | 2608 | ||
2608 | rxdp->Control_2 = | 2609 | rxdp->Control_2 = |
@@ -2636,7 +2637,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | |||
2636 | rxdp3->Buffer0_ptr = | 2637 | rxdp3->Buffer0_ptr = |
2637 | pci_map_single(ring->pdev, ba->ba_0, | 2638 | pci_map_single(ring->pdev, ba->ba_0, |
2638 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 2639 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
2639 | if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) | 2640 | if (pci_dma_mapping_error(nic->pdev, |
2641 | rxdp3->Buffer0_ptr)) | ||
2640 | goto pci_map_failed; | 2642 | goto pci_map_failed; |
2641 | } else | 2643 | } else |
2642 | pci_dma_sync_single_for_device(ring->pdev, | 2644 | pci_dma_sync_single_for_device(ring->pdev, |
@@ -2655,7 +2657,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | |||
2655 | (ring->pdev, skb->data, ring->mtu + 4, | 2657 | (ring->pdev, skb->data, ring->mtu + 4, |
2656 | PCI_DMA_FROMDEVICE); | 2658 | PCI_DMA_FROMDEVICE); |
2657 | 2659 | ||
2658 | if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) | 2660 | if (pci_dma_mapping_error(nic->pdev, |
2661 | rxdp3->Buffer2_ptr)) | ||
2659 | goto pci_map_failed; | 2662 | goto pci_map_failed; |
2660 | 2663 | ||
2661 | if (from_card_up) { | 2664 | if (from_card_up) { |
@@ -2664,8 +2667,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | |||
2664 | ba->ba_1, BUF1_LEN, | 2667 | ba->ba_1, BUF1_LEN, |
2665 | PCI_DMA_FROMDEVICE); | 2668 | PCI_DMA_FROMDEVICE); |
2666 | 2669 | ||
2667 | if (pci_dma_mapping_error | 2670 | if (pci_dma_mapping_error(nic->pdev, |
2668 | (rxdp3->Buffer1_ptr)) { | 2671 | rxdp3->Buffer1_ptr)) { |
2669 | pci_unmap_single | 2672 | pci_unmap_single |
2670 | (ring->pdev, | 2673 | (ring->pdev, |
2671 | (dma_addr_t)(unsigned long) | 2674 | (dma_addr_t)(unsigned long) |
@@ -2806,9 +2809,9 @@ static void free_rx_buffers(struct s2io_nic *sp) | |||
2806 | } | 2809 | } |
2807 | } | 2810 | } |
2808 | 2811 | ||
2809 | static int s2io_chk_rx_buffers(struct ring_info *ring) | 2812 | static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring) |
2810 | { | 2813 | { |
2811 | if (fill_rx_buffers(ring, 0) == -ENOMEM) { | 2814 | if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) { |
2812 | DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); | 2815 | DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); |
2813 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); | 2816 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); |
2814 | } | 2817 | } |
@@ -2848,7 +2851,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget) | |||
2848 | return 0; | 2851 | return 0; |
2849 | 2852 | ||
2850 | pkts_processed = rx_intr_handler(ring, budget); | 2853 | pkts_processed = rx_intr_handler(ring, budget); |
2851 | s2io_chk_rx_buffers(ring); | 2854 | s2io_chk_rx_buffers(nic, ring); |
2852 | 2855 | ||
2853 | if (pkts_processed < budget_org) { | 2856 | if (pkts_processed < budget_org) { |
2854 | netif_rx_complete(dev, napi); | 2857 | netif_rx_complete(dev, napi); |
@@ -2882,7 +2885,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget) | |||
2882 | for (i = 0; i < config->rx_ring_num; i++) { | 2885 | for (i = 0; i < config->rx_ring_num; i++) { |
2883 | ring = &mac_control->rings[i]; | 2886 | ring = &mac_control->rings[i]; |
2884 | ring_pkts_processed = rx_intr_handler(ring, budget); | 2887 | ring_pkts_processed = rx_intr_handler(ring, budget); |
2885 | s2io_chk_rx_buffers(ring); | 2888 | s2io_chk_rx_buffers(nic, ring); |
2886 | pkts_processed += ring_pkts_processed; | 2889 | pkts_processed += ring_pkts_processed; |
2887 | budget -= ring_pkts_processed; | 2890 | budget -= ring_pkts_processed; |
2888 | if (budget <= 0) | 2891 | if (budget <= 0) |
@@ -2939,7 +2942,8 @@ static void s2io_netpoll(struct net_device *dev) | |||
2939 | rx_intr_handler(&mac_control->rings[i], 0); | 2942 | rx_intr_handler(&mac_control->rings[i], 0); |
2940 | 2943 | ||
2941 | for (i = 0; i < config->rx_ring_num; i++) { | 2944 | for (i = 0; i < config->rx_ring_num; i++) { |
2942 | if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) { | 2945 | if (fill_rx_buffers(nic, &mac_control->rings[i], 0) == |
2946 | -ENOMEM) { | ||
2943 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | 2947 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); |
2944 | DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); | 2948 | DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); |
2945 | break; | 2949 | break; |
@@ -4235,14 +4239,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4235 | txdp->Buffer_Pointer = pci_map_single(sp->pdev, | 4239 | txdp->Buffer_Pointer = pci_map_single(sp->pdev, |
4236 | fifo->ufo_in_band_v, | 4240 | fifo->ufo_in_band_v, |
4237 | sizeof(u64), PCI_DMA_TODEVICE); | 4241 | sizeof(u64), PCI_DMA_TODEVICE); |
4238 | if (pci_dma_mapping_error(txdp->Buffer_Pointer)) | 4242 | if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) |
4239 | goto pci_map_failed; | 4243 | goto pci_map_failed; |
4240 | txdp++; | 4244 | txdp++; |
4241 | } | 4245 | } |
4242 | 4246 | ||
4243 | txdp->Buffer_Pointer = pci_map_single | 4247 | txdp->Buffer_Pointer = pci_map_single |
4244 | (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); | 4248 | (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); |
4245 | if (pci_dma_mapping_error(txdp->Buffer_Pointer)) | 4249 | if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) |
4246 | goto pci_map_failed; | 4250 | goto pci_map_failed; |
4247 | 4251 | ||
4248 | txdp->Host_Control = (unsigned long) skb; | 4252 | txdp->Host_Control = (unsigned long) skb; |
@@ -4345,7 +4349,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) | |||
4345 | netif_rx_schedule(dev, &ring->napi); | 4349 | netif_rx_schedule(dev, &ring->napi); |
4346 | } else { | 4350 | } else { |
4347 | rx_intr_handler(ring, 0); | 4351 | rx_intr_handler(ring, 0); |
4348 | s2io_chk_rx_buffers(ring); | 4352 | s2io_chk_rx_buffers(sp, ring); |
4349 | } | 4353 | } |
4350 | 4354 | ||
4351 | return IRQ_HANDLED; | 4355 | return IRQ_HANDLED; |
@@ -4826,7 +4830,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
4826 | */ | 4830 | */ |
4827 | if (!config->napi) { | 4831 | if (!config->napi) { |
4828 | for (i = 0; i < config->rx_ring_num; i++) | 4832 | for (i = 0; i < config->rx_ring_num; i++) |
4829 | s2io_chk_rx_buffers(&mac_control->rings[i]); | 4833 | s2io_chk_rx_buffers(sp, &mac_control->rings[i]); |
4830 | } | 4834 | } |
4831 | writeq(sp->general_int_mask, &bar0->general_int_mask); | 4835 | writeq(sp->general_int_mask, &bar0->general_int_mask); |
4832 | readl(&bar0->general_int_status); | 4836 | readl(&bar0->general_int_status); |
@@ -6859,7 +6863,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6859 | pci_map_single( sp->pdev, (*skb)->data, | 6863 | pci_map_single( sp->pdev, (*skb)->data, |
6860 | size - NET_IP_ALIGN, | 6864 | size - NET_IP_ALIGN, |
6861 | PCI_DMA_FROMDEVICE); | 6865 | PCI_DMA_FROMDEVICE); |
6862 | if (pci_dma_mapping_error(rxdp1->Buffer0_ptr)) | 6866 | if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr)) |
6863 | goto memalloc_failed; | 6867 | goto memalloc_failed; |
6864 | rxdp->Host_Control = (unsigned long) (*skb); | 6868 | rxdp->Host_Control = (unsigned long) (*skb); |
6865 | } | 6869 | } |
@@ -6886,12 +6890,13 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6886 | pci_map_single(sp->pdev, (*skb)->data, | 6890 | pci_map_single(sp->pdev, (*skb)->data, |
6887 | dev->mtu + 4, | 6891 | dev->mtu + 4, |
6888 | PCI_DMA_FROMDEVICE); | 6892 | PCI_DMA_FROMDEVICE); |
6889 | if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) | 6893 | if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr)) |
6890 | goto memalloc_failed; | 6894 | goto memalloc_failed; |
6891 | rxdp3->Buffer0_ptr = *temp0 = | 6895 | rxdp3->Buffer0_ptr = *temp0 = |
6892 | pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, | 6896 | pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, |
6893 | PCI_DMA_FROMDEVICE); | 6897 | PCI_DMA_FROMDEVICE); |
6894 | if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) { | 6898 | if (pci_dma_mapping_error(sp->pdev, |
6899 | rxdp3->Buffer0_ptr)) { | ||
6895 | pci_unmap_single (sp->pdev, | 6900 | pci_unmap_single (sp->pdev, |
6896 | (dma_addr_t)rxdp3->Buffer2_ptr, | 6901 | (dma_addr_t)rxdp3->Buffer2_ptr, |
6897 | dev->mtu + 4, PCI_DMA_FROMDEVICE); | 6902 | dev->mtu + 4, PCI_DMA_FROMDEVICE); |
@@ -6903,7 +6908,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6903 | rxdp3->Buffer1_ptr = *temp1 = | 6908 | rxdp3->Buffer1_ptr = *temp1 = |
6904 | pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, | 6909 | pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, |
6905 | PCI_DMA_FROMDEVICE); | 6910 | PCI_DMA_FROMDEVICE); |
6906 | if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) { | 6911 | if (pci_dma_mapping_error(sp->pdev, |
6912 | rxdp3->Buffer1_ptr)) { | ||
6907 | pci_unmap_single (sp->pdev, | 6913 | pci_unmap_single (sp->pdev, |
6908 | (dma_addr_t)rxdp3->Buffer0_ptr, | 6914 | (dma_addr_t)rxdp3->Buffer0_ptr, |
6909 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 6915 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
@@ -7187,7 +7193,7 @@ static int s2io_card_up(struct s2io_nic * sp) | |||
7187 | 7193 | ||
7188 | for (i = 0; i < config->rx_ring_num; i++) { | 7194 | for (i = 0; i < config->rx_ring_num; i++) { |
7189 | mac_control->rings[i].mtu = dev->mtu; | 7195 | mac_control->rings[i].mtu = dev->mtu; |
7190 | ret = fill_rx_buffers(&mac_control->rings[i], 1); | 7196 | ret = fill_rx_buffers(sp, &mac_control->rings[i], 1); |
7191 | if (ret) { | 7197 | if (ret) { |
7192 | DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", | 7198 | DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", |
7193 | dev->name); | 7199 | dev->name); |
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 601b001437c0..0d27dd39bc09 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -233,7 +233,7 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue, | |||
233 | rx_buf->data, rx_buf->len, | 233 | rx_buf->data, rx_buf->len, |
234 | PCI_DMA_FROMDEVICE); | 234 | PCI_DMA_FROMDEVICE); |
235 | 235 | ||
236 | if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) { | 236 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) { |
237 | dev_kfree_skb_any(rx_buf->skb); | 237 | dev_kfree_skb_any(rx_buf->skb); |
238 | rx_buf->skb = NULL; | 238 | rx_buf->skb = NULL; |
239 | return -EIO; | 239 | return -EIO; |
@@ -275,7 +275,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
275 | 0, efx_rx_buf_size(efx), | 275 | 0, efx_rx_buf_size(efx), |
276 | PCI_DMA_FROMDEVICE); | 276 | PCI_DMA_FROMDEVICE); |
277 | 277 | ||
278 | if (unlikely(pci_dma_mapping_error(dma_addr))) { | 278 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { |
279 | __free_pages(rx_buf->page, efx->rx_buffer_order); | 279 | __free_pages(rx_buf->page, efx->rx_buffer_order); |
280 | rx_buf->page = NULL; | 280 | rx_buf->page = NULL; |
281 | return -EIO; | 281 | return -EIO; |
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 5cdd082ab8f6..5e8374ab28ee 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -172,7 +172,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
172 | 172 | ||
173 | /* Process all fragments */ | 173 | /* Process all fragments */ |
174 | while (1) { | 174 | while (1) { |
175 | if (unlikely(pci_dma_mapping_error(dma_addr))) | 175 | if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) |
176 | goto pci_err; | 176 | goto pci_err; |
177 | 177 | ||
178 | /* Store fields for marking in the per-fragment final | 178 | /* Store fields for marking in the per-fragment final |
@@ -661,7 +661,8 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) | |||
661 | tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, | 661 | tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, |
662 | TSOH_BUFFER(tsoh), header_len, | 662 | TSOH_BUFFER(tsoh), header_len, |
663 | PCI_DMA_TODEVICE); | 663 | PCI_DMA_TODEVICE); |
664 | if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) { | 664 | if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, |
665 | tsoh->dma_addr))) { | ||
665 | kfree(tsoh); | 666 | kfree(tsoh); |
666 | return NULL; | 667 | return NULL; |
667 | } | 668 | } |
@@ -863,7 +864,7 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, | |||
863 | 864 | ||
864 | st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, | 865 | st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, |
865 | len, PCI_DMA_TODEVICE); | 866 | len, PCI_DMA_TODEVICE); |
866 | if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) { | 867 | if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { |
867 | st->ifc.unmap_len = len; | 868 | st->ifc.unmap_len = len; |
868 | st->ifc.len = len; | 869 | st->ifc.len = len; |
869 | st->ifc.dma_addr = st->ifc.unmap_addr; | 870 | st->ifc.dma_addr = st->ifc.unmap_addr; |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 00aa0b108cb9..b6435d0d71f9 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -452,7 +452,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, | |||
452 | /* iommu-map the skb */ | 452 | /* iommu-map the skb */ |
453 | buf = pci_map_single(card->pdev, descr->skb->data, | 453 | buf = pci_map_single(card->pdev, descr->skb->data, |
454 | SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); | 454 | SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); |
455 | if (pci_dma_mapping_error(buf)) { | 455 | if (pci_dma_mapping_error(card->pdev, buf)) { |
456 | dev_kfree_skb_any(descr->skb); | 456 | dev_kfree_skb_any(descr->skb); |
457 | descr->skb = NULL; | 457 | descr->skb = NULL; |
458 | if (netif_msg_rx_err(card) && net_ratelimit()) | 458 | if (netif_msg_rx_err(card) && net_ratelimit()) |
@@ -691,7 +691,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, | |||
691 | unsigned long flags; | 691 | unsigned long flags; |
692 | 692 | ||
693 | buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); | 693 | buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); |
694 | if (pci_dma_mapping_error(buf)) { | 694 | if (pci_dma_mapping_error(card->pdev, buf)) { |
695 | if (netif_msg_tx_err(card) && net_ratelimit()) | 695 | if (netif_msg_tx_err(card) && net_ratelimit()) |
696 | dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " | 696 | dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " |
697 | "Dropping packet\n", skb->data, skb->len); | 697 | "Dropping packet\n", skb->data, skb->len); |
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index a645e5028c14..8487ace9d2e3 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c | |||
@@ -506,7 +506,7 @@ static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle) | |||
506 | return NULL; | 506 | return NULL; |
507 | *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, | 507 | *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, |
508 | PCI_DMA_FROMDEVICE); | 508 | PCI_DMA_FROMDEVICE); |
509 | if (pci_dma_mapping_error(*dma_handle)) { | 509 | if (pci_dma_mapping_error(hwdev, *dma_handle)) { |
510 | free_page((unsigned long)buf); | 510 | free_page((unsigned long)buf); |
511 | return NULL; | 511 | return NULL; |
512 | } | 512 | } |
@@ -536,7 +536,7 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev, | |||
536 | return NULL; | 536 | return NULL; |
537 | *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE, | 537 | *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE, |
538 | PCI_DMA_FROMDEVICE); | 538 | PCI_DMA_FROMDEVICE); |
539 | if (pci_dma_mapping_error(*dma_handle)) { | 539 | if (pci_dma_mapping_error(hwdev, *dma_handle)) { |
540 | dev_kfree_skb_any(skb); | 540 | dev_kfree_skb_any(skb); |
541 | return NULL; | 541 | return NULL; |
542 | } | 542 | } |
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index 217d506527a9..d9769c527346 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c | |||
@@ -1166,7 +1166,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) | |||
1166 | bf->skb = skb; | 1166 | bf->skb = skb; |
1167 | bf->skbaddr = pci_map_single(sc->pdev, | 1167 | bf->skbaddr = pci_map_single(sc->pdev, |
1168 | skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); | 1168 | skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); |
1169 | if (unlikely(pci_dma_mapping_error(bf->skbaddr))) { | 1169 | if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) { |
1170 | ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); | 1170 | ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); |
1171 | dev_kfree_skb(skb); | 1171 | dev_kfree_skb(skb); |
1172 | bf->skb = NULL; | 1172 | bf->skb = NULL; |
@@ -1918,7 +1918,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) | |||
1918 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " | 1918 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " |
1919 | "skbaddr %llx\n", skb, skb->data, skb->len, | 1919 | "skbaddr %llx\n", skb, skb->data, skb->len, |
1920 | (unsigned long long)bf->skbaddr); | 1920 | (unsigned long long)bf->skbaddr); |
1921 | if (pci_dma_mapping_error(bf->skbaddr)) { | 1921 | if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) { |
1922 | ATH5K_ERR(sc, "beacon DMA mapping failed\n"); | 1922 | ATH5K_ERR(sc, "beacon DMA mapping failed\n"); |
1923 | return -EIO; | 1923 | return -EIO; |
1924 | } | 1924 | } |
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c index 0338b0912674..e97059415ab4 100644 --- a/drivers/parport/ieee1284.c +++ b/drivers/parport/ieee1284.c | |||
@@ -199,8 +199,6 @@ int parport_wait_peripheral(struct parport *port, | |||
199 | /* 40ms of slow polling. */ | 199 | /* 40ms of slow polling. */ |
200 | deadline = jiffies + msecs_to_jiffies(40); | 200 | deadline = jiffies + msecs_to_jiffies(40); |
201 | while (time_before (jiffies, deadline)) { | 201 | while (time_before (jiffies, deadline)) { |
202 | int ret; | ||
203 | |||
204 | if (signal_pending (current)) | 202 | if (signal_pending (current)) |
205 | return -EINTR; | 203 | return -EINTR; |
206 | 204 | ||
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c index 802a81d47367..00e1d9620f7c 100644 --- a/drivers/parport/parport_cs.c +++ b/drivers/parport/parport_cs.c | |||
@@ -235,7 +235,7 @@ failed: | |||
235 | 235 | ||
236 | ======================================================================*/ | 236 | ======================================================================*/ |
237 | 237 | ||
238 | void parport_cs_release(struct pcmcia_device *link) | 238 | static void parport_cs_release(struct pcmcia_device *link) |
239 | { | 239 | { |
240 | parport_info_t *info = link->priv; | 240 | parport_info_t *info = link->priv; |
241 | 241 | ||
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index e0c2a4584ec6..8a846adf1dcf 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c | |||
@@ -2867,7 +2867,7 @@ static struct parport_pc_pci { | |||
2867 | * and 840 locks up if you write 1 to bit 2! */ | 2867 | * and 840 locks up if you write 1 to bit 2! */ |
2868 | /* oxsemi_952 */ { 1, { { 0, 1 }, } }, | 2868 | /* oxsemi_952 */ { 1, { { 0, 1 }, } }, |
2869 | /* oxsemi_954 */ { 1, { { 0, -1 }, } }, | 2869 | /* oxsemi_954 */ { 1, { { 0, -1 }, } }, |
2870 | /* oxsemi_840 */ { 1, { { 0, -1 }, } }, | 2870 | /* oxsemi_840 */ { 1, { { 0, 1 }, } }, |
2871 | /* aks_0100 */ { 1, { { 0, -1 }, } }, | 2871 | /* aks_0100 */ { 1, { { 0, -1 }, } }, |
2872 | /* mobility_pp */ { 1, { { 0, 1 }, } }, | 2872 | /* mobility_pp */ { 1, { { 0, 1 }, } }, |
2873 | /* netmos_9705 */ { 1, { { 0, -1 }, } }, /* untested */ | 2873 | /* netmos_9705 */ { 1, { { 0, -1 }, } }, /* untested */ |
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c index d950fc34320a..554e11f9e1ce 100644 --- a/drivers/parport/procfs.c +++ b/drivers/parport/procfs.c | |||
@@ -429,9 +429,6 @@ struct parport_default_sysctl_table | |||
429 | ctl_table dev_dir[2]; | 429 | ctl_table dev_dir[2]; |
430 | }; | 430 | }; |
431 | 431 | ||
432 | extern unsigned long parport_default_timeslice; | ||
433 | extern int parport_default_spintime; | ||
434 | |||
435 | static struct parport_default_sysctl_table | 432 | static struct parport_default_sysctl_table |
436 | parport_default_sysctl_table = { | 433 | parport_default_sysctl_table = { |
437 | .sysctl_header = NULL, | 434 | .sysctl_header = NULL, |
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h index e3fa9a2d9a3d..9fd7bb9b7dce 100644 --- a/drivers/pnp/base.h +++ b/drivers/pnp/base.h | |||
@@ -19,7 +19,6 @@ struct pnp_id *pnp_add_id(struct pnp_dev *dev, char *id); | |||
19 | int pnp_interface_attach_device(struct pnp_dev *dev); | 19 | int pnp_interface_attach_device(struct pnp_dev *dev); |
20 | 20 | ||
21 | int pnp_add_card(struct pnp_card *card); | 21 | int pnp_add_card(struct pnp_card *card); |
22 | struct pnp_id *pnp_add_card_id(struct pnp_card *card, char *id); | ||
23 | void pnp_remove_card(struct pnp_card *card); | 22 | void pnp_remove_card(struct pnp_card *card); |
24 | int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev); | 23 | int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev); |
25 | void pnp_remove_card_device(struct pnp_dev *dev); | 24 | void pnp_remove_card_device(struct pnp_dev *dev); |
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c index a762a4176736..e75b060daa95 100644 --- a/drivers/pnp/card.c +++ b/drivers/pnp/card.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/ctype.h> | 8 | #include <linux/ctype.h> |
9 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
10 | #include <linux/pnp.h> | 10 | #include <linux/pnp.h> |
11 | #include <linux/dma-mapping.h> | ||
11 | #include "base.h" | 12 | #include "base.h" |
12 | 13 | ||
13 | LIST_HEAD(pnp_cards); | 14 | LIST_HEAD(pnp_cards); |
@@ -101,7 +102,7 @@ static int card_probe(struct pnp_card *card, struct pnp_card_driver *drv) | |||
101 | * @id: pointer to a pnp_id structure | 102 | * @id: pointer to a pnp_id structure |
102 | * @card: pointer to the desired card | 103 | * @card: pointer to the desired card |
103 | */ | 104 | */ |
104 | struct pnp_id *pnp_add_card_id(struct pnp_card *card, char *id) | 105 | static struct pnp_id *pnp_add_card_id(struct pnp_card *card, char *id) |
105 | { | 106 | { |
106 | struct pnp_id *dev_id, *ptr; | 107 | struct pnp_id *dev_id, *ptr; |
107 | 108 | ||
@@ -167,6 +168,9 @@ struct pnp_card *pnp_alloc_card(struct pnp_protocol *protocol, int id, char *pnp | |||
167 | sprintf(card->dev.bus_id, "%02x:%02x", card->protocol->number, | 168 | sprintf(card->dev.bus_id, "%02x:%02x", card->protocol->number, |
168 | card->number); | 169 | card->number); |
169 | 170 | ||
171 | card->dev.coherent_dma_mask = DMA_24BIT_MASK; | ||
172 | card->dev.dma_mask = &card->dev.coherent_dma_mask; | ||
173 | |||
170 | dev_id = pnp_add_card_id(card, pnpid); | 174 | dev_id = pnp_add_card_id(card, pnpid); |
171 | if (!dev_id) { | 175 | if (!dev_id) { |
172 | kfree(card); | 176 | kfree(card); |
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c index 55f55ed72dc7..0bdf9b8a5e58 100644 --- a/drivers/pnp/quirks.c +++ b/drivers/pnp/quirks.c | |||
@@ -245,15 +245,17 @@ static void quirk_system_pci_resources(struct pnp_dev *dev) | |||
245 | */ | 245 | */ |
246 | for_each_pci_dev(pdev) { | 246 | for_each_pci_dev(pdev) { |
247 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | 247 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
248 | if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM) || | 248 | unsigned int type; |
249 | pci_resource_len(pdev, i) == 0) | 249 | |
250 | type = pci_resource_flags(pdev, i) & | ||
251 | (IORESOURCE_IO | IORESOURCE_MEM); | ||
252 | if (!type || pci_resource_len(pdev, i) == 0) | ||
250 | continue; | 253 | continue; |
251 | 254 | ||
252 | pci_start = pci_resource_start(pdev, i); | 255 | pci_start = pci_resource_start(pdev, i); |
253 | pci_end = pci_resource_end(pdev, i); | 256 | pci_end = pci_resource_end(pdev, i); |
254 | for (j = 0; | 257 | for (j = 0; |
255 | (res = pnp_get_resource(dev, IORESOURCE_MEM, j)); | 258 | (res = pnp_get_resource(dev, type, j)); j++) { |
256 | j++) { | ||
257 | if (res->start == 0 && res->end == 0) | 259 | if (res->start == 0 && res->end == 0) |
258 | continue; | 260 | continue; |
259 | 261 | ||
@@ -283,9 +285,10 @@ static void quirk_system_pci_resources(struct pnp_dev *dev) | |||
283 | * the PCI region, and that might prevent a PCI | 285 | * the PCI region, and that might prevent a PCI |
284 | * driver from requesting its resources. | 286 | * driver from requesting its resources. |
285 | */ | 287 | */ |
286 | dev_warn(&dev->dev, "mem resource " | 288 | dev_warn(&dev->dev, "%s resource " |
287 | "(0x%llx-0x%llx) overlaps %s BAR %d " | 289 | "(0x%llx-0x%llx) overlaps %s BAR %d " |
288 | "(0x%llx-0x%llx), disabling\n", | 290 | "(0x%llx-0x%llx), disabling\n", |
291 | pnp_resource_type_name(res), | ||
289 | (unsigned long long) pnp_start, | 292 | (unsigned long long) pnp_start, |
290 | (unsigned long long) pnp_end, | 293 | (unsigned long long) pnp_end, |
291 | pci_name(pdev), i, | 294 | pci_name(pdev), i, |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index c4a7c06793c5..61f8fdea2d96 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -3525,7 +3525,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost) | |||
3525 | crq->msg_token = dma_map_single(dev, crq->msgs, | 3525 | crq->msg_token = dma_map_single(dev, crq->msgs, |
3526 | PAGE_SIZE, DMA_BIDIRECTIONAL); | 3526 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
3527 | 3527 | ||
3528 | if (dma_mapping_error(crq->msg_token)) | 3528 | if (dma_mapping_error(dev, crq->msg_token)) |
3529 | goto map_failed; | 3529 | goto map_failed; |
3530 | 3530 | ||
3531 | retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, | 3531 | retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, |
@@ -3618,7 +3618,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost) | |||
3618 | async_q->size * sizeof(*async_q->msgs), | 3618 | async_q->size * sizeof(*async_q->msgs), |
3619 | DMA_BIDIRECTIONAL); | 3619 | DMA_BIDIRECTIONAL); |
3620 | 3620 | ||
3621 | if (dma_mapping_error(async_q->msg_token)) { | 3621 | if (dma_mapping_error(dev, async_q->msg_token)) { |
3622 | dev_err(dev, "Failed to map async queue\n"); | 3622 | dev_err(dev, "Failed to map async queue\n"); |
3623 | goto free_async_crq; | 3623 | goto free_async_crq; |
3624 | } | 3624 | } |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 20000ec79b04..6b24b9cdb04c 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -859,7 +859,7 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) | |||
859 | sizeof(hostdata->madapter_info), | 859 | sizeof(hostdata->madapter_info), |
860 | DMA_BIDIRECTIONAL); | 860 | DMA_BIDIRECTIONAL); |
861 | 861 | ||
862 | if (dma_mapping_error(req->buffer)) { | 862 | if (dma_mapping_error(hostdata->dev, req->buffer)) { |
863 | if (!firmware_has_feature(FW_FEATURE_CMO)) | 863 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
864 | dev_err(hostdata->dev, | 864 | dev_err(hostdata->dev, |
865 | "Unable to map request_buffer for " | 865 | "Unable to map request_buffer for " |
@@ -1407,7 +1407,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, | |||
1407 | length, | 1407 | length, |
1408 | DMA_BIDIRECTIONAL); | 1408 | DMA_BIDIRECTIONAL); |
1409 | 1409 | ||
1410 | if (dma_mapping_error(host_config->buffer)) { | 1410 | if (dma_mapping_error(hostdata->dev, host_config->buffer)) { |
1411 | if (!firmware_has_feature(FW_FEATURE_CMO)) | 1411 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
1412 | dev_err(hostdata->dev, | 1412 | dev_err(hostdata->dev, |
1413 | "dma_mapping error getting host config\n"); | 1413 | "dma_mapping error getting host config\n"); |
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c index 3b9514c8f1f1..2e13ec00172a 100644 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c +++ b/drivers/scsi/ibmvscsi/ibmvstgt.c | |||
@@ -564,7 +564,7 @@ static int crq_queue_create(struct crq_queue *queue, struct srp_target *target) | |||
564 | queue->size * sizeof(*queue->msgs), | 564 | queue->size * sizeof(*queue->msgs), |
565 | DMA_BIDIRECTIONAL); | 565 | DMA_BIDIRECTIONAL); |
566 | 566 | ||
567 | if (dma_mapping_error(queue->msg_token)) | 567 | if (dma_mapping_error(target->dev, queue->msg_token)) |
568 | goto map_failed; | 568 | goto map_failed; |
569 | 569 | ||
570 | err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token, | 570 | err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token, |
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c index 182146100dc1..462a8574dad9 100644 --- a/drivers/scsi/ibmvscsi/rpa_vscsi.c +++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c | |||
@@ -253,7 +253,7 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue, | |||
253 | queue->size * sizeof(*queue->msgs), | 253 | queue->size * sizeof(*queue->msgs), |
254 | DMA_BIDIRECTIONAL); | 254 | DMA_BIDIRECTIONAL); |
255 | 255 | ||
256 | if (dma_mapping_error(queue->msg_token)) | 256 | if (dma_mapping_error(hostdata->dev, queue->msg_token)) |
257 | goto map_failed; | 257 | goto map_failed; |
258 | 258 | ||
259 | gather_partition_info(); | 259 | gather_partition_info(); |
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c index e81d59d78910..0c7165660853 100644 --- a/drivers/spi/atmel_spi.c +++ b/drivers/spi/atmel_spi.c | |||
@@ -313,14 +313,14 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) | |||
313 | xfer->tx_dma = dma_map_single(dev, | 313 | xfer->tx_dma = dma_map_single(dev, |
314 | (void *) xfer->tx_buf, xfer->len, | 314 | (void *) xfer->tx_buf, xfer->len, |
315 | DMA_TO_DEVICE); | 315 | DMA_TO_DEVICE); |
316 | if (dma_mapping_error(xfer->tx_dma)) | 316 | if (dma_mapping_error(dev, xfer->tx_dma)) |
317 | return -ENOMEM; | 317 | return -ENOMEM; |
318 | } | 318 | } |
319 | if (xfer->rx_buf) { | 319 | if (xfer->rx_buf) { |
320 | xfer->rx_dma = dma_map_single(dev, | 320 | xfer->rx_dma = dma_map_single(dev, |
321 | xfer->rx_buf, xfer->len, | 321 | xfer->rx_buf, xfer->len, |
322 | DMA_FROM_DEVICE); | 322 | DMA_FROM_DEVICE); |
323 | if (dma_mapping_error(xfer->rx_dma)) { | 323 | if (dma_mapping_error(dev, xfer->rx_dma)) { |
324 | if (xfer->tx_buf) | 324 | if (xfer->tx_buf) |
325 | dma_unmap_single(dev, | 325 | dma_unmap_single(dev, |
326 | xfer->tx_dma, xfer->len, | 326 | xfer->tx_dma, xfer->len, |
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c index 9149689c79d9..87b73e0169c5 100644 --- a/drivers/spi/au1550_spi.c +++ b/drivers/spi/au1550_spi.c | |||
@@ -334,7 +334,7 @@ static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size) | |||
334 | hw->dma_rx_tmpbuf_size = size; | 334 | hw->dma_rx_tmpbuf_size = size; |
335 | hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf, | 335 | hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf, |
336 | size, DMA_FROM_DEVICE); | 336 | size, DMA_FROM_DEVICE); |
337 | if (dma_mapping_error(hw->dma_rx_tmpbuf_addr)) { | 337 | if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) { |
338 | kfree(hw->dma_rx_tmpbuf); | 338 | kfree(hw->dma_rx_tmpbuf); |
339 | hw->dma_rx_tmpbuf = 0; | 339 | hw->dma_rx_tmpbuf = 0; |
340 | hw->dma_rx_tmpbuf_size = 0; | 340 | hw->dma_rx_tmpbuf_size = 0; |
@@ -378,7 +378,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) | |||
378 | dma_rx_addr = dma_map_single(hw->dev, | 378 | dma_rx_addr = dma_map_single(hw->dev, |
379 | (void *)t->rx_buf, | 379 | (void *)t->rx_buf, |
380 | t->len, DMA_FROM_DEVICE); | 380 | t->len, DMA_FROM_DEVICE); |
381 | if (dma_mapping_error(dma_rx_addr)) | 381 | if (dma_mapping_error(hw->dev, dma_rx_addr)) |
382 | dev_err(hw->dev, "rx dma map error\n"); | 382 | dev_err(hw->dev, "rx dma map error\n"); |
383 | } | 383 | } |
384 | } else { | 384 | } else { |
@@ -401,7 +401,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) | |||
401 | dma_tx_addr = dma_map_single(hw->dev, | 401 | dma_tx_addr = dma_map_single(hw->dev, |
402 | (void *)t->tx_buf, | 402 | (void *)t->tx_buf, |
403 | t->len, DMA_TO_DEVICE); | 403 | t->len, DMA_TO_DEVICE); |
404 | if (dma_mapping_error(dma_tx_addr)) | 404 | if (dma_mapping_error(hw->dev, dma_tx_addr)) |
405 | dev_err(hw->dev, "tx dma map error\n"); | 405 | dev_err(hw->dev, "tx dma map error\n"); |
406 | } | 406 | } |
407 | } else { | 407 | } else { |
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c index b1cc148036c1..f6f987bb71ca 100644 --- a/drivers/spi/omap2_mcspi.c +++ b/drivers/spi/omap2_mcspi.c | |||
@@ -836,7 +836,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) | |||
836 | if (tx_buf != NULL) { | 836 | if (tx_buf != NULL) { |
837 | t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf, | 837 | t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf, |
838 | len, DMA_TO_DEVICE); | 838 | len, DMA_TO_DEVICE); |
839 | if (dma_mapping_error(t->tx_dma)) { | 839 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { |
840 | dev_dbg(&spi->dev, "dma %cX %d bytes error\n", | 840 | dev_dbg(&spi->dev, "dma %cX %d bytes error\n", |
841 | 'T', len); | 841 | 'T', len); |
842 | return -EINVAL; | 842 | return -EINVAL; |
@@ -845,7 +845,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) | |||
845 | if (rx_buf != NULL) { | 845 | if (rx_buf != NULL) { |
846 | t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len, | 846 | t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len, |
847 | DMA_FROM_DEVICE); | 847 | DMA_FROM_DEVICE); |
848 | if (dma_mapping_error(t->rx_dma)) { | 848 | if (dma_mapping_error(&spi->dev, t->rx_dma)) { |
849 | dev_dbg(&spi->dev, "dma %cX %d bytes error\n", | 849 | dev_dbg(&spi->dev, "dma %cX %d bytes error\n", |
850 | 'R', len); | 850 | 'R', len); |
851 | if (tx_buf != NULL) | 851 | if (tx_buf != NULL) |
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index 0c452c46ab07..067299d6d192 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c | |||
@@ -353,7 +353,7 @@ static int map_dma_buffers(struct driver_data *drv_data) | |||
353 | drv_data->rx_dma = dma_map_single(dev, drv_data->rx, | 353 | drv_data->rx_dma = dma_map_single(dev, drv_data->rx, |
354 | drv_data->rx_map_len, | 354 | drv_data->rx_map_len, |
355 | DMA_FROM_DEVICE); | 355 | DMA_FROM_DEVICE); |
356 | if (dma_mapping_error(drv_data->rx_dma)) | 356 | if (dma_mapping_error(dev, drv_data->rx_dma)) |
357 | return 0; | 357 | return 0; |
358 | 358 | ||
359 | /* Stream map the tx buffer */ | 359 | /* Stream map the tx buffer */ |
@@ -361,7 +361,7 @@ static int map_dma_buffers(struct driver_data *drv_data) | |||
361 | drv_data->tx_map_len, | 361 | drv_data->tx_map_len, |
362 | DMA_TO_DEVICE); | 362 | DMA_TO_DEVICE); |
363 | 363 | ||
364 | if (dma_mapping_error(drv_data->tx_dma)) { | 364 | if (dma_mapping_error(dev, drv_data->tx_dma)) { |
365 | dma_unmap_single(dev, drv_data->rx_dma, | 365 | dma_unmap_single(dev, drv_data->rx_dma, |
366 | drv_data->rx_map_len, DMA_FROM_DEVICE); | 366 | drv_data->rx_map_len, DMA_FROM_DEVICE); |
367 | return 0; | 367 | return 0; |
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c index 54ac7bea5f8c..6fb77fcc4971 100644 --- a/drivers/spi/spi_imx.c +++ b/drivers/spi/spi_imx.c | |||
@@ -491,7 +491,7 @@ static int map_dma_buffers(struct driver_data *drv_data) | |||
491 | buf, | 491 | buf, |
492 | drv_data->tx_map_len, | 492 | drv_data->tx_map_len, |
493 | DMA_TO_DEVICE); | 493 | DMA_TO_DEVICE); |
494 | if (dma_mapping_error(drv_data->tx_dma)) | 494 | if (dma_mapping_error(dev, drv_data->tx_dma)) |
495 | return -1; | 495 | return -1; |
496 | 496 | ||
497 | drv_data->tx_dma_needs_unmap = 1; | 497 | drv_data->tx_dma_needs_unmap = 1; |
@@ -516,7 +516,7 @@ static int map_dma_buffers(struct driver_data *drv_data) | |||
516 | buf, | 516 | buf, |
517 | drv_data->len, | 517 | drv_data->len, |
518 | DMA_FROM_DEVICE); | 518 | DMA_FROM_DEVICE); |
519 | if (dma_mapping_error(drv_data->rx_dma)) | 519 | if (dma_mapping_error(dev, drv_data->rx_dma)) |
520 | return -1; | 520 | return -1; |
521 | drv_data->rx_dma_needs_unmap = 1; | 521 | drv_data->rx_dma_needs_unmap = 1; |
522 | } | 522 | } |
@@ -534,7 +534,7 @@ static int map_dma_buffers(struct driver_data *drv_data) | |||
534 | buf, | 534 | buf, |
535 | drv_data->tx_map_len, | 535 | drv_data->tx_map_len, |
536 | DMA_TO_DEVICE); | 536 | DMA_TO_DEVICE); |
537 | if (dma_mapping_error(drv_data->tx_dma)) { | 537 | if (dma_mapping_error(dev, drv_data->tx_dma)) { |
538 | if (drv_data->rx_dma) { | 538 | if (drv_data->rx_dma) { |
539 | dma_unmap_single(dev, | 539 | dma_unmap_single(dev, |
540 | drv_data->rx_dma, | 540 | drv_data->rx_dma, |
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index 5e3e4e9b6c77..1f715436d6d3 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c | |||
@@ -87,7 +87,7 @@ struct mon_reader_text { | |||
87 | 87 | ||
88 | static struct dentry *mon_dir; /* Usually /sys/kernel/debug/usbmon */ | 88 | static struct dentry *mon_dir; /* Usually /sys/kernel/debug/usbmon */ |
89 | 89 | ||
90 | static void mon_text_ctor(struct kmem_cache *, void *); | 90 | static void mon_text_ctor(void *); |
91 | 91 | ||
92 | struct mon_text_ptr { | 92 | struct mon_text_ptr { |
93 | int cnt, limit; | 93 | int cnt, limit; |
@@ -720,7 +720,7 @@ void mon_text_del(struct mon_bus *mbus) | |||
720 | /* | 720 | /* |
721 | * Slab interface: constructor. | 721 | * Slab interface: constructor. |
722 | */ | 722 | */ |
723 | static void mon_text_ctor(struct kmem_cache *slab, void *mem) | 723 | static void mon_text_ctor(void *mem) |
724 | { | 724 | { |
725 | /* | 725 | /* |
726 | * Nothing to initialize. No, really! | 726 | * Nothing to initialize. No, really! |
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c index a11cc2fdd4cd..4055dbdd1b42 100644 --- a/drivers/video/console/sticon.c +++ b/drivers/video/console/sticon.c | |||
@@ -370,7 +370,7 @@ static const struct consw sti_con = { | |||
370 | 370 | ||
371 | 371 | ||
372 | 372 | ||
373 | int __init sticonsole_init(void) | 373 | static int __init sticonsole_init(void) |
374 | { | 374 | { |
375 | /* already initialized ? */ | 375 | /* already initialized ? */ |
376 | if (sticon_sti) | 376 | if (sticon_sti) |
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c index e9ab657f0bb7..d7822af0e00a 100644 --- a/drivers/video/console/sticore.c +++ b/drivers/video/console/sticore.c | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | #define STI_DRIVERVERSION "Version 0.9a" | 30 | #define STI_DRIVERVERSION "Version 0.9a" |
31 | 31 | ||
32 | struct sti_struct *default_sti __read_mostly; | 32 | static struct sti_struct *default_sti __read_mostly; |
33 | 33 | ||
34 | /* number of STI ROMS found and their ptrs to each struct */ | 34 | /* number of STI ROMS found and their ptrs to each struct */ |
35 | static int num_sti_roms __read_mostly; | 35 | static int num_sti_roms __read_mostly; |
@@ -68,8 +68,7 @@ static const struct sti_init_flags default_init_flags = { | |||
68 | .init_cmap_tx = 1, | 68 | .init_cmap_tx = 1, |
69 | }; | 69 | }; |
70 | 70 | ||
71 | int | 71 | static int sti_init_graph(struct sti_struct *sti) |
72 | sti_init_graph(struct sti_struct *sti) | ||
73 | { | 72 | { |
74 | struct sti_init_inptr_ext inptr_ext = { 0, }; | 73 | struct sti_init_inptr_ext inptr_ext = { 0, }; |
75 | struct sti_init_inptr inptr = { | 74 | struct sti_init_inptr inptr = { |
@@ -100,8 +99,7 @@ static const struct sti_conf_flags default_conf_flags = { | |||
100 | .wait = STI_WAIT, | 99 | .wait = STI_WAIT, |
101 | }; | 100 | }; |
102 | 101 | ||
103 | void | 102 | static void sti_inq_conf(struct sti_struct *sti) |
104 | sti_inq_conf(struct sti_struct *sti) | ||
105 | { | 103 | { |
106 | struct sti_conf_inptr inptr = { 0, }; | 104 | struct sti_conf_inptr inptr = { 0, }; |
107 | unsigned long flags; | 105 | unsigned long flags; |
@@ -237,8 +235,8 @@ static void sti_flush(unsigned long start, unsigned long end) | |||
237 | flush_icache_range(start, end); | 235 | flush_icache_range(start, end); |
238 | } | 236 | } |
239 | 237 | ||
240 | void __devinit | 238 | static void __devinit sti_rom_copy(unsigned long base, unsigned long count, |
241 | sti_rom_copy(unsigned long base, unsigned long count, void *dest) | 239 | void *dest) |
242 | { | 240 | { |
243 | unsigned long dest_start = (unsigned long) dest; | 241 | unsigned long dest_start = (unsigned long) dest; |
244 | 242 | ||
@@ -478,8 +476,8 @@ sti_init_glob_cfg(struct sti_struct *sti, | |||
478 | } | 476 | } |
479 | 477 | ||
480 | #ifdef CONFIG_FB | 478 | #ifdef CONFIG_FB |
481 | struct sti_cooked_font * __devinit | 479 | static struct sti_cooked_font __devinit |
482 | sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name) | 480 | *sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name) |
483 | { | 481 | { |
484 | const struct font_desc *fbfont; | 482 | const struct font_desc *fbfont; |
485 | unsigned int size, bpc; | 483 | unsigned int size, bpc; |
@@ -534,16 +532,16 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name) | |||
534 | return cooked_font; | 532 | return cooked_font; |
535 | } | 533 | } |
536 | #else | 534 | #else |
537 | struct sti_cooked_font * __devinit | 535 | static struct sti_cooked_font __devinit |
538 | sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name) | 536 | *sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name) |
539 | { | 537 | { |
540 | return NULL; | 538 | return NULL; |
541 | } | 539 | } |
542 | #endif | 540 | #endif |
543 | 541 | ||
544 | struct sti_cooked_font * __devinit | 542 | static struct sti_cooked_font __devinit |
545 | sti_select_font(struct sti_cooked_rom *rom, | 543 | *sti_select_font(struct sti_cooked_rom *rom, |
546 | int (*search_font_fnc) (struct sti_cooked_rom *,int,int) ) | 544 | int (*search_font_fnc)(struct sti_cooked_rom *, int, int)) |
547 | { | 545 | { |
548 | struct sti_cooked_font *font; | 546 | struct sti_cooked_font *font; |
549 | int i; | 547 | int i; |
@@ -707,8 +705,7 @@ sti_get_bmode_rom (unsigned long address) | |||
707 | return raw; | 705 | return raw; |
708 | } | 706 | } |
709 | 707 | ||
710 | struct sti_rom * __devinit | 708 | static struct sti_rom __devinit *sti_get_wmode_rom(unsigned long address) |
711 | sti_get_wmode_rom (unsigned long address) | ||
712 | { | 709 | { |
713 | struct sti_rom *raw; | 710 | struct sti_rom *raw; |
714 | unsigned long size; | 711 | unsigned long size; |
@@ -723,8 +720,8 @@ sti_get_wmode_rom (unsigned long address) | |||
723 | return raw; | 720 | return raw; |
724 | } | 721 | } |
725 | 722 | ||
726 | int __devinit | 723 | static int __devinit sti_read_rom(int wordmode, struct sti_struct *sti, |
727 | sti_read_rom(int wordmode, struct sti_struct *sti, unsigned long address) | 724 | unsigned long address) |
728 | { | 725 | { |
729 | struct sti_cooked_rom *cooked; | 726 | struct sti_cooked_rom *cooked; |
730 | struct sti_rom *raw = NULL; | 727 | struct sti_rom *raw = NULL; |
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 5d84b3431098..6b487801eeae 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/device.h> | 35 | #include <linux/device.h> |
36 | #include <linux/efi.h> | 36 | #include <linux/efi.h> |
37 | #include <linux/fb.h> | 37 | #include <linux/fb.h> |
38 | #include <linux/major.h> | ||
39 | 38 | ||
40 | #include <asm/fb.h> | 39 | #include <asm/fb.h> |
41 | 40 | ||
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c index aa8c714d6245..b790ddff76f9 100644 --- a/drivers/video/macfb.c +++ b/drivers/video/macfb.c | |||
@@ -596,7 +596,7 @@ static struct fb_ops macfb_ops = { | |||
596 | .fb_imageblit = cfb_imageblit, | 596 | .fb_imageblit = cfb_imageblit, |
597 | }; | 597 | }; |
598 | 598 | ||
599 | void __init macfb_setup(char *options) | 599 | static void __init macfb_setup(char *options) |
600 | { | 600 | { |
601 | char *this_opt; | 601 | char *this_opt; |
602 | 602 | ||
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index 2b707a8ce5de..69de2fed6c58 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c | |||
@@ -1336,7 +1336,7 @@ static int __devinit pxafb_map_video_memory(struct pxafb_info *fbi) | |||
1336 | fbi->dma_buff_phys = fbi->map_dma; | 1336 | fbi->dma_buff_phys = fbi->map_dma; |
1337 | fbi->palette_cpu = (u16 *) fbi->dma_buff->palette; | 1337 | fbi->palette_cpu = (u16 *) fbi->dma_buff->palette; |
1338 | 1338 | ||
1339 | pr_debug("pxafb: palette_mem_size = 0x%08lx\n", fbi->palette_size*sizeof(u16)); | 1339 | pr_debug("pxafb: palette_mem_size = 0x%08x\n", fbi->palette_size*sizeof(u16)); |
1340 | 1340 | ||
1341 | #ifdef CONFIG_FB_PXA_SMARTPANEL | 1341 | #ifdef CONFIG_FB_PXA_SMARTPANEL |
1342 | fbi->smart_cmds = (uint16_t *) fbi->dma_buff->cmd_buff; | 1342 | fbi->smart_cmds = (uint16_t *) fbi->dma_buff->cmd_buff; |
diff --git a/drivers/video/sticore.h b/drivers/video/sticore.h index 1a9a60c74be3..7fe5be4bc70e 100644 --- a/drivers/video/sticore.h +++ b/drivers/video/sticore.h | |||
@@ -352,8 +352,6 @@ struct sti_struct *sti_get_rom(unsigned int index); /* 0: default sti */ | |||
352 | 352 | ||
353 | /* functions to call the STI ROM directly */ | 353 | /* functions to call the STI ROM directly */ |
354 | 354 | ||
355 | int sti_init_graph(struct sti_struct *sti); | ||
356 | void sti_inq_conf(struct sti_struct *sti); | ||
357 | void sti_putc(struct sti_struct *sti, int c, int y, int x); | 355 | void sti_putc(struct sti_struct *sti, int c, int y, int x); |
358 | void sti_set(struct sti_struct *sti, int src_y, int src_x, | 356 | void sti_set(struct sti_struct *sti, int src_y, int src_x, |
359 | int height, int width, u8 color); | 357 | int height, int width, u8 color); |
diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c index 598d35eff935..166481402412 100644 --- a/drivers/video/stifb.c +++ b/drivers/video/stifb.c | |||
@@ -1078,8 +1078,7 @@ static struct fb_ops stifb_ops = { | |||
1078 | * Initialization | 1078 | * Initialization |
1079 | */ | 1079 | */ |
1080 | 1080 | ||
1081 | int __init | 1081 | static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) |
1082 | stifb_init_fb(struct sti_struct *sti, int bpp_pref) | ||
1083 | { | 1082 | { |
1084 | struct fb_fix_screeninfo *fix; | 1083 | struct fb_fix_screeninfo *fix; |
1085 | struct fb_var_screeninfo *var; | 1084 | struct fb_var_screeninfo *var; |
@@ -1315,8 +1314,7 @@ static int stifb_disabled __initdata; | |||
1315 | int __init | 1314 | int __init |
1316 | stifb_setup(char *options); | 1315 | stifb_setup(char *options); |
1317 | 1316 | ||
1318 | int __init | 1317 | static int __init stifb_init(void) |
1319 | stifb_init(void) | ||
1320 | { | 1318 | { |
1321 | struct sti_struct *sti; | 1319 | struct sti_struct *sti; |
1322 | struct sti_struct *def_sti; | 1320 | struct sti_struct *def_sti; |