diff options
author | Paul Mundt <lethal@linux-sh.org> | 2011-03-31 02:39:47 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2011-03-31 02:39:47 -0400 |
commit | 7ea5db8efeac8627500e012aa6829ca612c5a700 (patch) | |
tree | 90e4de22f60b989dcf0f0d7436978c0b463d5827 /drivers | |
parent | eee7631fdf8ae63c4f24daf66981ac1a7b55d7fd (diff) | |
parent | 6aba74f2791287ec407e0f92487a725a25908067 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into sh-latest
Diffstat (limited to 'drivers')
96 files changed, 2024 insertions, 2041 deletions
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c index 5253b271b3fe..f6b3f995f58a 100644 --- a/drivers/ata/pata_ixp4xx_cf.c +++ b/drivers/ata/pata_ixp4xx_cf.c | |||
@@ -167,7 +167,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev) | |||
167 | 167 | ||
168 | irq = platform_get_irq(pdev, 0); | 168 | irq = platform_get_irq(pdev, 0); |
169 | if (irq) | 169 | if (irq) |
170 | set_irq_type(irq, IRQ_TYPE_EDGE_RISING); | 170 | irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); |
171 | 171 | ||
172 | /* Setup expansion bus chip selects */ | 172 | /* Setup expansion bus chip selects */ |
173 | *data->cs0_cfg = data->cs0_bits; | 173 | *data->cs0_cfg = data->cs0_bits; |
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c index a2a73d953840..b86d7e22595e 100644 --- a/drivers/ata/pata_palmld.c +++ b/drivers/ata/pata_palmld.c | |||
@@ -33,6 +33,11 @@ | |||
33 | 33 | ||
34 | #define DRV_NAME "pata_palmld" | 34 | #define DRV_NAME "pata_palmld" |
35 | 35 | ||
36 | static struct gpio palmld_hdd_gpios[] = { | ||
37 | { GPIO_NR_PALMLD_IDE_PWEN, GPIOF_INIT_HIGH, "HDD Power" }, | ||
38 | { GPIO_NR_PALMLD_IDE_RESET, GPIOF_INIT_LOW, "HDD Reset" }, | ||
39 | }; | ||
40 | |||
36 | static struct scsi_host_template palmld_sht = { | 41 | static struct scsi_host_template palmld_sht = { |
37 | ATA_PIO_SHT(DRV_NAME), | 42 | ATA_PIO_SHT(DRV_NAME), |
38 | }; | 43 | }; |
@@ -52,28 +57,23 @@ static __devinit int palmld_pata_probe(struct platform_device *pdev) | |||
52 | 57 | ||
53 | /* allocate host */ | 58 | /* allocate host */ |
54 | host = ata_host_alloc(&pdev->dev, 1); | 59 | host = ata_host_alloc(&pdev->dev, 1); |
55 | if (!host) | 60 | if (!host) { |
56 | return -ENOMEM; | 61 | ret = -ENOMEM; |
62 | goto err1; | ||
63 | } | ||
57 | 64 | ||
58 | /* remap drive's physical memory address */ | 65 | /* remap drive's physical memory address */ |
59 | mem = devm_ioremap(&pdev->dev, PALMLD_IDE_PHYS, 0x1000); | 66 | mem = devm_ioremap(&pdev->dev, PALMLD_IDE_PHYS, 0x1000); |
60 | if (!mem) | 67 | if (!mem) { |
61 | return -ENOMEM; | 68 | ret = -ENOMEM; |
69 | goto err1; | ||
70 | } | ||
62 | 71 | ||
63 | /* request and activate power GPIO, IRQ GPIO */ | 72 | /* request and activate power GPIO, IRQ GPIO */ |
64 | ret = gpio_request(GPIO_NR_PALMLD_IDE_PWEN, "HDD PWR"); | 73 | ret = gpio_request_array(palmld_hdd_gpios, |
74 | ARRAY_SIZE(palmld_hdd_gpios)); | ||
65 | if (ret) | 75 | if (ret) |
66 | goto err1; | 76 | goto err1; |
67 | ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_PWEN, 1); | ||
68 | if (ret) | ||
69 | goto err2; | ||
70 | |||
71 | ret = gpio_request(GPIO_NR_PALMLD_IDE_RESET, "HDD RST"); | ||
72 | if (ret) | ||
73 | goto err2; | ||
74 | ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_RESET, 0); | ||
75 | if (ret) | ||
76 | goto err3; | ||
77 | 77 | ||
78 | /* reset the drive */ | 78 | /* reset the drive */ |
79 | gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 0); | 79 | gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 0); |
@@ -96,13 +96,15 @@ static __devinit int palmld_pata_probe(struct platform_device *pdev) | |||
96 | ata_sff_std_ports(&ap->ioaddr); | 96 | ata_sff_std_ports(&ap->ioaddr); |
97 | 97 | ||
98 | /* activate host */ | 98 | /* activate host */ |
99 | return ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING, | 99 | ret = ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING, |
100 | &palmld_sht); | 100 | &palmld_sht); |
101 | if (ret) | ||
102 | goto err2; | ||
103 | |||
104 | return ret; | ||
101 | 105 | ||
102 | err3: | ||
103 | gpio_free(GPIO_NR_PALMLD_IDE_RESET); | ||
104 | err2: | 106 | err2: |
105 | gpio_free(GPIO_NR_PALMLD_IDE_PWEN); | 107 | gpio_free_array(palmld_hdd_gpios, ARRAY_SIZE(palmld_hdd_gpios)); |
106 | err1: | 108 | err1: |
107 | return ret; | 109 | return ret; |
108 | } | 110 | } |
@@ -116,8 +118,7 @@ static __devexit int palmld_pata_remove(struct platform_device *dev) | |||
116 | /* power down the HDD */ | 118 | /* power down the HDD */ |
117 | gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0); | 119 | gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0); |
118 | 120 | ||
119 | gpio_free(GPIO_NR_PALMLD_IDE_RESET); | 121 | gpio_free_array(palmld_hdd_gpios, ARRAY_SIZE(palmld_hdd_gpios)); |
120 | gpio_free(GPIO_NR_PALMLD_IDE_PWEN); | ||
121 | 122 | ||
122 | return 0; | 123 | return 0; |
123 | } | 124 | } |
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c index baeaf938d55b..1b9d10d9c5d9 100644 --- a/drivers/ata/pata_rb532_cf.c +++ b/drivers/ata/pata_rb532_cf.c | |||
@@ -60,10 +60,10 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance) | |||
60 | struct rb532_cf_info *info = ah->private_data; | 60 | struct rb532_cf_info *info = ah->private_data; |
61 | 61 | ||
62 | if (gpio_get_value(info->gpio_line)) { | 62 | if (gpio_get_value(info->gpio_line)) { |
63 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW); | 63 | irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW); |
64 | ata_sff_interrupt(info->irq, dev_instance); | 64 | ata_sff_interrupt(info->irq, dev_instance); |
65 | } else { | 65 | } else { |
66 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); | 66 | irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); |
67 | } | 67 | } |
68 | 68 | ||
69 | return IRQ_HANDLED; | 69 | return IRQ_HANDLED; |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 1f46f1cd9225..7beb0e25f1e1 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
@@ -980,7 +980,7 @@ int tpm_open(struct inode *inode, struct file *file) | |||
980 | return -EBUSY; | 980 | return -EBUSY; |
981 | } | 981 | } |
982 | 982 | ||
983 | chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL); | 983 | chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL); |
984 | if (chip->data_buffer == NULL) { | 984 | if (chip->data_buffer == NULL) { |
985 | clear_bit(0, &chip->is_open); | 985 | clear_bit(0, &chip->is_open); |
986 | put_device(chip->dev); | 986 | put_device(chip->dev); |
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 0be30e978c85..31e71c4fc831 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -2679,7 +2679,7 @@ static int __init amd64_edac_init(void) | |||
2679 | mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL); | 2679 | mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL); |
2680 | ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); | 2680 | ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); |
2681 | if (!(mcis && ecc_stngs)) | 2681 | if (!(mcis && ecc_stngs)) |
2682 | goto err_ret; | 2682 | goto err_free; |
2683 | 2683 | ||
2684 | msrs = msrs_alloc(); | 2684 | msrs = msrs_alloc(); |
2685 | if (!msrs) | 2685 | if (!msrs) |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index d3743204a7e9..d3b295305542 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -416,7 +416,7 @@ config GPIO_JANZ_TTL | |||
416 | 416 | ||
417 | config AB8500_GPIO | 417 | config AB8500_GPIO |
418 | bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions" | 418 | bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions" |
419 | depends on AB8500_CORE | 419 | depends on AB8500_CORE && BROKEN |
420 | help | 420 | help |
421 | Select this to enable the AB8500 IC GPIO driver | 421 | Select this to enable the AB8500 IC GPIO driver |
422 | endif | 422 | endif |
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index f141a1de519c..89aa9fb743af 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c | |||
@@ -116,7 +116,7 @@ static int fan_alarm_init(struct gpio_fan_data *fan_data, | |||
116 | return 0; | 116 | return 0; |
117 | 117 | ||
118 | INIT_WORK(&fan_data->alarm_work, fan_alarm_notify); | 118 | INIT_WORK(&fan_data->alarm_work, fan_alarm_notify); |
119 | set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH); | 119 | irq_set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH); |
120 | err = request_irq(alarm_irq, fan_alarm_irq_handler, IRQF_SHARED, | 120 | err = request_irq(alarm_irq, fan_alarm_irq_handler, IRQF_SHARED, |
121 | "GPIO fan alarm", fan_data); | 121 | "GPIO fan alarm", fan_data); |
122 | if (err) | 122 | if (err) |
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c index b732870ecc89..71f744a8e686 100644 --- a/drivers/input/keyboard/lm8323.c +++ b/drivers/input/keyboard/lm8323.c | |||
@@ -809,7 +809,7 @@ static int lm8323_suspend(struct device *dev) | |||
809 | struct lm8323_chip *lm = i2c_get_clientdata(client); | 809 | struct lm8323_chip *lm = i2c_get_clientdata(client); |
810 | int i; | 810 | int i; |
811 | 811 | ||
812 | set_irq_wake(client->irq, 0); | 812 | irq_set_irq_wake(client->irq, 0); |
813 | disable_irq(client->irq); | 813 | disable_irq(client->irq); |
814 | 814 | ||
815 | mutex_lock(&lm->lock); | 815 | mutex_lock(&lm->lock); |
@@ -838,7 +838,7 @@ static int lm8323_resume(struct device *dev) | |||
838 | led_classdev_resume(&lm->pwm[i].cdev); | 838 | led_classdev_resume(&lm->pwm[i].cdev); |
839 | 839 | ||
840 | enable_irq(client->irq); | 840 | enable_irq(client->irq); |
841 | set_irq_wake(client->irq, 1); | 841 | irq_set_irq_wake(client->irq, 1); |
842 | 842 | ||
843 | return 0; | 843 | return 0; |
844 | } | 844 | } |
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c index ebe955325677..4b2a42f9f0bb 100644 --- a/drivers/input/serio/ams_delta_serio.c +++ b/drivers/input/serio/ams_delta_serio.c | |||
@@ -149,7 +149,7 @@ static int __init ams_delta_serio_init(void) | |||
149 | * at FIQ level, switch back from edge to simple interrupt handler | 149 | * at FIQ level, switch back from edge to simple interrupt handler |
150 | * to avoid bad interaction. | 150 | * to avoid bad interaction. |
151 | */ | 151 | */ |
152 | set_irq_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), | 152 | irq_set_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), |
153 | handle_simple_irq); | 153 | handle_simple_irq); |
154 | 154 | ||
155 | serio_register_port(ams_delta_serio); | 155 | serio_register_port(ams_delta_serio); |
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c index b6b8b1c7ecea..3242e7076258 100644 --- a/drivers/input/touchscreen/mainstone-wm97xx.c +++ b/drivers/input/touchscreen/mainstone-wm97xx.c | |||
@@ -219,7 +219,7 @@ static int wm97xx_acc_startup(struct wm97xx *wm) | |||
219 | } | 219 | } |
220 | 220 | ||
221 | wm->pen_irq = gpio_to_irq(irq); | 221 | wm->pen_irq = gpio_to_irq(irq); |
222 | set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH); | 222 | irq_set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH); |
223 | } else /* pen irq not supported */ | 223 | } else /* pen irq not supported */ |
224 | pen_int = 0; | 224 | pen_int = 0; |
225 | 225 | ||
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c index 048849867643..5b0f15ec874a 100644 --- a/drivers/input/touchscreen/zylonite-wm97xx.c +++ b/drivers/input/touchscreen/zylonite-wm97xx.c | |||
@@ -193,7 +193,7 @@ static int zylonite_wm97xx_probe(struct platform_device *pdev) | |||
193 | gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26); | 193 | gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26); |
194 | 194 | ||
195 | wm->pen_irq = IRQ_GPIO(gpio_touch_irq); | 195 | wm->pen_irq = IRQ_GPIO(gpio_touch_irq); |
196 | set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH); | 196 | irq_set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH); |
197 | 197 | ||
198 | wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN, | 198 | wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN, |
199 | WM97XX_GPIO_POL_HIGH, | 199 | WM97XX_GPIO_POL_HIGH, |
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 9a46d64996a9..e2fea580585a 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig | |||
@@ -60,15 +60,6 @@ config MFD_ASIC3 | |||
60 | This driver supports the ASIC3 multifunction chip found on many | 60 | This driver supports the ASIC3 multifunction chip found on many |
61 | PDAs (mainly iPAQ and HTC based ones) | 61 | PDAs (mainly iPAQ and HTC based ones) |
62 | 62 | ||
63 | config MFD_SH_MOBILE_SDHI | ||
64 | bool "Support for SuperH Mobile SDHI" | ||
65 | depends on SUPERH || ARCH_SHMOBILE | ||
66 | select MFD_CORE | ||
67 | select TMIO_MMC_DMA | ||
68 | ---help--- | ||
69 | This driver supports the SDHI hardware block found in many | ||
70 | SuperH Mobile SoCs. | ||
71 | |||
72 | config MFD_DAVINCI_VOICECODEC | 63 | config MFD_DAVINCI_VOICECODEC |
73 | tristate | 64 | tristate |
74 | select MFD_CORE | 65 | select MFD_CORE |
@@ -266,11 +257,6 @@ config MFD_TMIO | |||
266 | bool | 257 | bool |
267 | default n | 258 | default n |
268 | 259 | ||
269 | config TMIO_MMC_DMA | ||
270 | bool | ||
271 | select DMA_ENGINE | ||
272 | select DMADEVICES | ||
273 | |||
274 | config MFD_T7L66XB | 260 | config MFD_T7L66XB |
275 | bool "Support Toshiba T7L66XB" | 261 | bool "Support Toshiba T7L66XB" |
276 | depends on ARM && HAVE_CLK | 262 | depends on ARM && HAVE_CLK |
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index ef489f253402..419caa9d7dcf 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile | |||
@@ -6,7 +6,6 @@ | |||
6 | obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o | 6 | obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o |
7 | obj-$(CONFIG_MFD_SM501) += sm501.o | 7 | obj-$(CONFIG_MFD_SM501) += sm501.o |
8 | obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o | 8 | obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o |
9 | obj-$(CONFIG_MFD_SH_MOBILE_SDHI) += sh_mobile_sdhi.o | ||
10 | 9 | ||
11 | obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o | 10 | obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o |
12 | obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o | 11 | obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o |
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index 28852dfa310d..20e4e9395b61 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c | |||
@@ -373,7 +373,7 @@ static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name, | |||
373 | 373 | ||
374 | if (gru_irq_count[chiplet] == 0) { | 374 | if (gru_irq_count[chiplet] == 0) { |
375 | gru_chip[chiplet].name = irq_name; | 375 | gru_chip[chiplet].name = irq_name; |
376 | ret = set_irq_chip(irq, &gru_chip[chiplet]); | 376 | ret = irq_set_chip(irq, &gru_chip[chiplet]); |
377 | if (ret) { | 377 | if (ret) { |
378 | printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n", | 378 | printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n", |
379 | GRU_DRIVER_ID_STR, -ret); | 379 | GRU_DRIVER_ID_STR, -ret); |
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 5ec8eddfcf6e..f5cedeccad42 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c | |||
@@ -1875,7 +1875,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write, | |||
1875 | unsigned int tot_sz, int max_scatter) | 1875 | unsigned int tot_sz, int max_scatter) |
1876 | { | 1876 | { |
1877 | unsigned int dev_addr, i, cnt, sz, ssz; | 1877 | unsigned int dev_addr, i, cnt, sz, ssz; |
1878 | struct timespec ts1, ts2, ts; | 1878 | struct timespec ts1, ts2; |
1879 | int ret; | 1879 | int ret; |
1880 | 1880 | ||
1881 | sz = test->area.max_tfr; | 1881 | sz = test->area.max_tfr; |
@@ -1912,7 +1912,6 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write, | |||
1912 | } | 1912 | } |
1913 | getnstimeofday(&ts2); | 1913 | getnstimeofday(&ts2); |
1914 | 1914 | ||
1915 | ts = timespec_sub(ts2, ts1); | ||
1916 | mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); | 1915 | mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); |
1917 | 1916 | ||
1918 | return 0; | 1917 | return 0; |
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index 797cdb5887fd..76af349c14b4 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * your option) any later version. | 9 | * your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/slab.h> | ||
12 | #include <linux/types.h> | 13 | #include <linux/types.h> |
13 | #include <linux/scatterlist.h> | 14 | #include <linux/scatterlist.h> |
14 | 15 | ||
@@ -252,6 +253,7 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) | |||
252 | struct mmc_command cmd; | 253 | struct mmc_command cmd; |
253 | struct mmc_data data; | 254 | struct mmc_data data; |
254 | struct scatterlist sg; | 255 | struct scatterlist sg; |
256 | void *data_buf; | ||
255 | 257 | ||
256 | BUG_ON(!card); | 258 | BUG_ON(!card); |
257 | BUG_ON(!card->host); | 259 | BUG_ON(!card->host); |
@@ -263,6 +265,13 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) | |||
263 | if (err) | 265 | if (err) |
264 | return err; | 266 | return err; |
265 | 267 | ||
268 | /* dma onto stack is unsafe/nonportable, but callers to this | ||
269 | * routine normally provide temporary on-stack buffers ... | ||
270 | */ | ||
271 | data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL); | ||
272 | if (data_buf == NULL) | ||
273 | return -ENOMEM; | ||
274 | |||
266 | memset(&mrq, 0, sizeof(struct mmc_request)); | 275 | memset(&mrq, 0, sizeof(struct mmc_request)); |
267 | memset(&cmd, 0, sizeof(struct mmc_command)); | 276 | memset(&cmd, 0, sizeof(struct mmc_command)); |
268 | memset(&data, 0, sizeof(struct mmc_data)); | 277 | memset(&data, 0, sizeof(struct mmc_data)); |
@@ -280,12 +289,15 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) | |||
280 | data.sg = &sg; | 289 | data.sg = &sg; |
281 | data.sg_len = 1; | 290 | data.sg_len = 1; |
282 | 291 | ||
283 | sg_init_one(&sg, scr, 8); | 292 | sg_init_one(&sg, data_buf, 8); |
284 | 293 | ||
285 | mmc_set_data_timeout(&data, card); | 294 | mmc_set_data_timeout(&data, card); |
286 | 295 | ||
287 | mmc_wait_for_req(card->host, &mrq); | 296 | mmc_wait_for_req(card->host, &mrq); |
288 | 297 | ||
298 | memcpy(scr, data_buf, sizeof(card->raw_scr)); | ||
299 | kfree(data_buf); | ||
300 | |||
289 | if (cmd.error) | 301 | if (cmd.error) |
290 | return cmd.error; | 302 | return cmd.error; |
291 | if (data.error) | 303 | if (data.error) |
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 1a21c6427a19..94df40531c38 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
@@ -439,13 +439,25 @@ config MMC_SDRICOH_CS | |||
439 | To compile this driver as a module, choose M here: the | 439 | To compile this driver as a module, choose M here: the |
440 | module will be called sdricoh_cs. | 440 | module will be called sdricoh_cs. |
441 | 441 | ||
442 | config MMC_TMIO_CORE | ||
443 | tristate | ||
444 | |||
442 | config MMC_TMIO | 445 | config MMC_TMIO |
443 | tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" | 446 | tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" |
444 | depends on MFD_TMIO || MFD_ASIC3 || MFD_SH_MOBILE_SDHI | 447 | depends on MFD_TMIO || MFD_ASIC3 |
448 | select MMC_TMIO_CORE | ||
445 | help | 449 | help |
446 | This provides support for the SD/MMC cell found in TC6393XB, | 450 | This provides support for the SD/MMC cell found in TC6393XB, |
447 | T7L66XB and also HTC ASIC3 | 451 | T7L66XB and also HTC ASIC3 |
448 | 452 | ||
453 | config MMC_SDHI | ||
454 | tristate "SH-Mobile SDHI SD/SDIO controller support" | ||
455 | depends on SUPERH || ARCH_SHMOBILE | ||
456 | select MMC_TMIO_CORE | ||
457 | help | ||
458 | This provides support for the SDHI SD/SDIO controller found in | ||
459 | SuperH and ARM SH-Mobile SoCs | ||
460 | |||
449 | config MMC_CB710 | 461 | config MMC_CB710 |
450 | tristate "ENE CB710 MMC/SD Interface support" | 462 | tristate "ENE CB710 MMC/SD Interface support" |
451 | depends on PCI | 463 | depends on PCI |
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 30aa6867745f..4f1df0aae574 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile | |||
@@ -29,7 +29,13 @@ endif | |||
29 | obj-$(CONFIG_MMC_S3C) += s3cmci.o | 29 | obj-$(CONFIG_MMC_S3C) += s3cmci.o |
30 | obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o | 30 | obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o |
31 | obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o | 31 | obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o |
32 | obj-$(CONFIG_MMC_CB710) += cb710-mmc.o | 32 | obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o |
33 | tmio_mmc_core-y := tmio_mmc_pio.o | ||
34 | ifneq ($(CONFIG_MMC_SDHI),n) | ||
35 | tmio_mmc_core-y += tmio_mmc_dma.o | ||
36 | endif | ||
37 | obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o | ||
38 | obj-$(CONFIG_MMC_CB710) += cb710-mmc.o | ||
33 | obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o | 39 | obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o |
34 | obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o | 40 | obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o |
35 | obj-$(CONFIG_MMC_DW) += dw_mmc.o | 41 | obj-$(CONFIG_MMC_DW) += dw_mmc.o |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 5a614069cb00..87e1f57ec9ba 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -316,7 +316,7 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host) | |||
316 | 316 | ||
317 | /* Stop the IDMAC running */ | 317 | /* Stop the IDMAC running */ |
318 | temp = mci_readl(host, BMOD); | 318 | temp = mci_readl(host, BMOD); |
319 | temp &= ~SDMMC_IDMAC_ENABLE; | 319 | temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); |
320 | mci_writel(host, BMOD, temp); | 320 | mci_writel(host, BMOD, temp); |
321 | } | 321 | } |
322 | 322 | ||
@@ -385,7 +385,7 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) | |||
385 | 385 | ||
386 | /* Enable the IDMAC */ | 386 | /* Enable the IDMAC */ |
387 | temp = mci_readl(host, BMOD); | 387 | temp = mci_readl(host, BMOD); |
388 | temp |= SDMMC_IDMAC_ENABLE; | 388 | temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; |
389 | mci_writel(host, BMOD, temp); | 389 | mci_writel(host, BMOD, temp); |
390 | 390 | ||
391 | /* Start it running */ | 391 | /* Start it running */ |
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 5bbb87d10251..b4a7e4fba90f 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
@@ -68,6 +68,12 @@ static struct variant_data variant_arm = { | |||
68 | .datalength_bits = 16, | 68 | .datalength_bits = 16, |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static struct variant_data variant_arm_extended_fifo = { | ||
72 | .fifosize = 128 * 4, | ||
73 | .fifohalfsize = 64 * 4, | ||
74 | .datalength_bits = 16, | ||
75 | }; | ||
76 | |||
71 | static struct variant_data variant_u300 = { | 77 | static struct variant_data variant_u300 = { |
72 | .fifosize = 16 * 4, | 78 | .fifosize = 16 * 4, |
73 | .fifohalfsize = 8 * 4, | 79 | .fifohalfsize = 8 * 4, |
@@ -1277,10 +1283,15 @@ static int mmci_resume(struct amba_device *dev) | |||
1277 | static struct amba_id mmci_ids[] = { | 1283 | static struct amba_id mmci_ids[] = { |
1278 | { | 1284 | { |
1279 | .id = 0x00041180, | 1285 | .id = 0x00041180, |
1280 | .mask = 0x000fffff, | 1286 | .mask = 0xff0fffff, |
1281 | .data = &variant_arm, | 1287 | .data = &variant_arm, |
1282 | }, | 1288 | }, |
1283 | { | 1289 | { |
1290 | .id = 0x01041180, | ||
1291 | .mask = 0xff0fffff, | ||
1292 | .data = &variant_arm_extended_fifo, | ||
1293 | }, | ||
1294 | { | ||
1284 | .id = 0x00041181, | 1295 | .id = 0x00041181, |
1285 | .mask = 0x000fffff, | 1296 | .mask = 0x000fffff, |
1286 | .data = &variant_arm, | 1297 | .data = &variant_arm, |
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index 5530def54e5b..e2aecb7f1d5c 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c | |||
@@ -15,9 +15,11 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/irq.h> | ||
18 | #include <linux/gpio.h> | 19 | #include <linux/gpio.h> |
19 | #include <linux/of.h> | 20 | #include <linux/of.h> |
20 | #include <linux/of_gpio.h> | 21 | #include <linux/of_gpio.h> |
22 | #include <linux/of_irq.h> | ||
21 | #include <linux/spi/spi.h> | 23 | #include <linux/spi/spi.h> |
22 | #include <linux/spi/mmc_spi.h> | 24 | #include <linux/spi/mmc_spi.h> |
23 | #include <linux/mmc/core.h> | 25 | #include <linux/mmc/core.h> |
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 3b5248567973..a19967d0bfc4 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c | |||
@@ -16,14 +16,40 @@ | |||
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/gpio.h> | 18 | #include <linux/gpio.h> |
19 | #include <linux/slab.h> | ||
19 | #include <linux/mmc/host.h> | 20 | #include <linux/mmc/host.h> |
20 | #include <linux/mmc/sdhci-pltfm.h> | 21 | #include <linux/mmc/sdhci-pltfm.h> |
22 | #include <linux/mmc/mmc.h> | ||
23 | #include <linux/mmc/sdio.h> | ||
21 | #include <mach/hardware.h> | 24 | #include <mach/hardware.h> |
22 | #include <mach/esdhc.h> | 25 | #include <mach/esdhc.h> |
23 | #include "sdhci.h" | 26 | #include "sdhci.h" |
24 | #include "sdhci-pltfm.h" | 27 | #include "sdhci-pltfm.h" |
25 | #include "sdhci-esdhc.h" | 28 | #include "sdhci-esdhc.h" |
26 | 29 | ||
30 | /* VENDOR SPEC register */ | ||
31 | #define SDHCI_VENDOR_SPEC 0xC0 | ||
32 | #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 | ||
33 | |||
34 | #define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0) | ||
35 | /* | ||
36 | * The CMDTYPE of the CMD register (offset 0xE) should be set to | ||
37 | * "11" when the STOP CMD12 is issued on imx53 to abort one | ||
38 | * open ended multi-blk IO. Otherwise the TC INT wouldn't | ||
39 | * be generated. | ||
40 | * In exact block transfer, the controller doesn't complete the | ||
41 | * operations automatically as required at the end of the | ||
42 | * transfer and remains on hold if the abort command is not sent. | ||
43 | * As a result, the TC flag is not asserted and SW received timeout | ||
44 | * exeception. Bit1 of Vendor Spec registor is used to fix it. | ||
45 | */ | ||
46 | #define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1) | ||
47 | |||
48 | struct pltfm_imx_data { | ||
49 | int flags; | ||
50 | u32 scratchpad; | ||
51 | }; | ||
52 | |||
27 | static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) | 53 | static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) |
28 | { | 54 | { |
29 | void __iomem *base = host->ioaddr + (reg & ~0x3); | 55 | void __iomem *base = host->ioaddr + (reg & ~0x3); |
@@ -34,10 +60,14 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i | |||
34 | 60 | ||
35 | static u32 esdhc_readl_le(struct sdhci_host *host, int reg) | 61 | static u32 esdhc_readl_le(struct sdhci_host *host, int reg) |
36 | { | 62 | { |
63 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
64 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
65 | |||
37 | /* fake CARD_PRESENT flag on mx25/35 */ | 66 | /* fake CARD_PRESENT flag on mx25/35 */ |
38 | u32 val = readl(host->ioaddr + reg); | 67 | u32 val = readl(host->ioaddr + reg); |
39 | 68 | ||
40 | if (unlikely(reg == SDHCI_PRESENT_STATE)) { | 69 | if (unlikely((reg == SDHCI_PRESENT_STATE) |
70 | && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) { | ||
41 | struct esdhc_platform_data *boarddata = | 71 | struct esdhc_platform_data *boarddata = |
42 | host->mmc->parent->platform_data; | 72 | host->mmc->parent->platform_data; |
43 | 73 | ||
@@ -55,13 +85,26 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) | |||
55 | 85 | ||
56 | static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) | 86 | static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) |
57 | { | 87 | { |
58 | if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) | 88 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
89 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
90 | |||
91 | if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) | ||
92 | && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) | ||
59 | /* | 93 | /* |
60 | * these interrupts won't work with a custom card_detect gpio | 94 | * these interrupts won't work with a custom card_detect gpio |
61 | * (only applied to mx25/35) | 95 | * (only applied to mx25/35) |
62 | */ | 96 | */ |
63 | val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); | 97 | val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); |
64 | 98 | ||
99 | if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) | ||
100 | && (reg == SDHCI_INT_STATUS) | ||
101 | && (val & SDHCI_INT_DATA_END))) { | ||
102 | u32 v; | ||
103 | v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); | ||
104 | v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK; | ||
105 | writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); | ||
106 | } | ||
107 | |||
65 | writel(val, host->ioaddr + reg); | 108 | writel(val, host->ioaddr + reg); |
66 | } | 109 | } |
67 | 110 | ||
@@ -76,6 +119,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg) | |||
76 | static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) | 119 | static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) |
77 | { | 120 | { |
78 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 121 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
122 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
79 | 123 | ||
80 | switch (reg) { | 124 | switch (reg) { |
81 | case SDHCI_TRANSFER_MODE: | 125 | case SDHCI_TRANSFER_MODE: |
@@ -83,10 +127,22 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) | |||
83 | * Postpone this write, we must do it together with a | 127 | * Postpone this write, we must do it together with a |
84 | * command write that is down below. | 128 | * command write that is down below. |
85 | */ | 129 | */ |
86 | pltfm_host->scratchpad = val; | 130 | if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) |
131 | && (host->cmd->opcode == SD_IO_RW_EXTENDED) | ||
132 | && (host->cmd->data->blocks > 1) | ||
133 | && (host->cmd->data->flags & MMC_DATA_READ)) { | ||
134 | u32 v; | ||
135 | v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); | ||
136 | v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK; | ||
137 | writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); | ||
138 | } | ||
139 | imx_data->scratchpad = val; | ||
87 | return; | 140 | return; |
88 | case SDHCI_COMMAND: | 141 | case SDHCI_COMMAND: |
89 | writel(val << 16 | pltfm_host->scratchpad, | 142 | if ((host->cmd->opcode == MMC_STOP_TRANSMISSION) |
143 | && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) | ||
144 | val |= SDHCI_CMD_ABORTCMD; | ||
145 | writel(val << 16 | imx_data->scratchpad, | ||
90 | host->ioaddr + SDHCI_TRANSFER_MODE); | 146 | host->ioaddr + SDHCI_TRANSFER_MODE); |
91 | return; | 147 | return; |
92 | case SDHCI_BLOCK_SIZE: | 148 | case SDHCI_BLOCK_SIZE: |
@@ -146,7 +202,9 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) | |||
146 | } | 202 | } |
147 | 203 | ||
148 | static struct sdhci_ops sdhci_esdhc_ops = { | 204 | static struct sdhci_ops sdhci_esdhc_ops = { |
205 | .read_l = esdhc_readl_le, | ||
149 | .read_w = esdhc_readw_le, | 206 | .read_w = esdhc_readw_le, |
207 | .write_l = esdhc_writel_le, | ||
150 | .write_w = esdhc_writew_le, | 208 | .write_w = esdhc_writew_le, |
151 | .write_b = esdhc_writeb_le, | 209 | .write_b = esdhc_writeb_le, |
152 | .set_clock = esdhc_set_clock, | 210 | .set_clock = esdhc_set_clock, |
@@ -168,6 +226,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
168 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; | 226 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; |
169 | struct clk *clk; | 227 | struct clk *clk; |
170 | int err; | 228 | int err; |
229 | struct pltfm_imx_data *imx_data; | ||
171 | 230 | ||
172 | clk = clk_get(mmc_dev(host->mmc), NULL); | 231 | clk = clk_get(mmc_dev(host->mmc), NULL); |
173 | if (IS_ERR(clk)) { | 232 | if (IS_ERR(clk)) { |
@@ -177,7 +236,15 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
177 | clk_enable(clk); | 236 | clk_enable(clk); |
178 | pltfm_host->clk = clk; | 237 | pltfm_host->clk = clk; |
179 | 238 | ||
180 | if (cpu_is_mx35() || cpu_is_mx51()) | 239 | imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL); |
240 | if (!imx_data) { | ||
241 | clk_disable(pltfm_host->clk); | ||
242 | clk_put(pltfm_host->clk); | ||
243 | return -ENOMEM; | ||
244 | } | ||
245 | pltfm_host->priv = imx_data; | ||
246 | |||
247 | if (!cpu_is_mx25()) | ||
181 | host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; | 248 | host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; |
182 | 249 | ||
183 | if (cpu_is_mx25() || cpu_is_mx35()) { | 250 | if (cpu_is_mx25() || cpu_is_mx35()) { |
@@ -187,6 +254,9 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
187 | sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro; | 254 | sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro; |
188 | } | 255 | } |
189 | 256 | ||
257 | if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51())) | ||
258 | imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; | ||
259 | |||
190 | if (boarddata) { | 260 | if (boarddata) { |
191 | err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); | 261 | err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); |
192 | if (err) { | 262 | if (err) { |
@@ -214,8 +284,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
214 | goto no_card_detect_irq; | 284 | goto no_card_detect_irq; |
215 | } | 285 | } |
216 | 286 | ||
217 | sdhci_esdhc_ops.write_l = esdhc_writel_le; | 287 | imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP; |
218 | sdhci_esdhc_ops.read_l = esdhc_readl_le; | ||
219 | /* Now we have a working card_detect again */ | 288 | /* Now we have a working card_detect again */ |
220 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; | 289 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; |
221 | } | 290 | } |
@@ -227,6 +296,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
227 | no_card_detect_pin: | 296 | no_card_detect_pin: |
228 | boarddata->cd_gpio = err; | 297 | boarddata->cd_gpio = err; |
229 | not_supported: | 298 | not_supported: |
299 | kfree(imx_data); | ||
230 | return 0; | 300 | return 0; |
231 | } | 301 | } |
232 | 302 | ||
@@ -234,6 +304,7 @@ static void esdhc_pltfm_exit(struct sdhci_host *host) | |||
234 | { | 304 | { |
235 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 305 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
236 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; | 306 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; |
307 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
237 | 308 | ||
238 | if (boarddata && gpio_is_valid(boarddata->wp_gpio)) | 309 | if (boarddata && gpio_is_valid(boarddata->wp_gpio)) |
239 | gpio_free(boarddata->wp_gpio); | 310 | gpio_free(boarddata->wp_gpio); |
@@ -247,6 +318,7 @@ static void esdhc_pltfm_exit(struct sdhci_host *host) | |||
247 | 318 | ||
248 | clk_disable(pltfm_host->clk); | 319 | clk_disable(pltfm_host->clk); |
249 | clk_put(pltfm_host->clk); | 320 | clk_put(pltfm_host->clk); |
321 | kfree(imx_data); | ||
250 | } | 322 | } |
251 | 323 | ||
252 | struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { | 324 | struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { |
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index c55aae828aac..c3b08f111942 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h | |||
@@ -23,8 +23,7 @@ | |||
23 | SDHCI_QUIRK_NONSTANDARD_CLOCK | \ | 23 | SDHCI_QUIRK_NONSTANDARD_CLOCK | \ |
24 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ | 24 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ |
25 | SDHCI_QUIRK_PIO_NEEDS_DELAY | \ | 25 | SDHCI_QUIRK_PIO_NEEDS_DELAY | \ |
26 | SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | \ | 26 | SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) |
27 | SDHCI_QUIRK_NO_CARD_NO_RESET) | ||
28 | 27 | ||
29 | #define ESDHC_SYSTEM_CONTROL 0x2c | 28 | #define ESDHC_SYSTEM_CONTROL 0x2c |
30 | #define ESDHC_CLOCK_MASK 0x0000fff0 | 29 | #define ESDHC_CLOCK_MASK 0x0000fff0 |
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 08161f690ae8..ba40d6d035c7 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c | |||
@@ -74,7 +74,8 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) | |||
74 | 74 | ||
75 | struct sdhci_of_data sdhci_esdhc = { | 75 | struct sdhci_of_data sdhci_esdhc = { |
76 | /* card detection could be handled via GPIO */ | 76 | /* card detection could be handled via GPIO */ |
77 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION, | 77 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION |
78 | | SDHCI_QUIRK_NO_CARD_NO_RESET, | ||
78 | .ops = { | 79 | .ops = { |
79 | .read_l = sdhci_be32bs_readl, | 80 | .read_l = sdhci_be32bs_readl, |
80 | .read_w = esdhc_readw, | 81 | .read_w = esdhc_readw, |
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 2f8d46854acd..a136be706347 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c | |||
@@ -1016,16 +1016,14 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev, | |||
1016 | struct sdhci_pci_chip *chip; | 1016 | struct sdhci_pci_chip *chip; |
1017 | struct sdhci_pci_slot *slot; | 1017 | struct sdhci_pci_slot *slot; |
1018 | 1018 | ||
1019 | u8 slots, rev, first_bar; | 1019 | u8 slots, first_bar; |
1020 | int ret, i; | 1020 | int ret, i; |
1021 | 1021 | ||
1022 | BUG_ON(pdev == NULL); | 1022 | BUG_ON(pdev == NULL); |
1023 | BUG_ON(ent == NULL); | 1023 | BUG_ON(ent == NULL); |
1024 | 1024 | ||
1025 | pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); | ||
1026 | |||
1027 | dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", | 1025 | dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", |
1028 | (int)pdev->vendor, (int)pdev->device, (int)rev); | 1026 | (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); |
1029 | 1027 | ||
1030 | ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); | 1028 | ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); |
1031 | if (ret) | 1029 | if (ret) |
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h index ea2e44d9be5e..2b37016ad0ac 100644 --- a/drivers/mmc/host/sdhci-pltfm.h +++ b/drivers/mmc/host/sdhci-pltfm.h | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | struct sdhci_pltfm_host { | 18 | struct sdhci_pltfm_host { |
19 | struct clk *clk; | 19 | struct clk *clk; |
20 | u32 scratchpad; /* to handle quirks across io-accessor calls */ | 20 | void *priv; /* to handle quirks across io-accessor calls */ |
21 | }; | 21 | }; |
22 | 22 | ||
23 | extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; | 23 | extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; |
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index d70c54c7b70a..60a4c97d3d18 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c | |||
@@ -50,7 +50,7 @@ static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id) | |||
50 | /* val == 1 -> card removed, val == 0 -> card inserted */ | 50 | /* val == 1 -> card removed, val == 0 -> card inserted */ |
51 | /* if card removed - set irq for low level, else vice versa */ | 51 | /* if card removed - set irq for low level, else vice versa */ |
52 | gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH; | 52 | gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH; |
53 | set_irq_type(irq, gpio_irq_type); | 53 | irq_set_irq_type(irq, gpio_irq_type); |
54 | 54 | ||
55 | if (sdhci->data->card_power_gpio >= 0) { | 55 | if (sdhci->data->card_power_gpio >= 0) { |
56 | if (!sdhci->data->power_always_enb) { | 56 | if (!sdhci->data->power_always_enb) { |
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 6e0969e40650..25e8bde600d1 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
@@ -45,6 +45,7 @@ | |||
45 | #define SDHCI_CMD_CRC 0x08 | 45 | #define SDHCI_CMD_CRC 0x08 |
46 | #define SDHCI_CMD_INDEX 0x10 | 46 | #define SDHCI_CMD_INDEX 0x10 |
47 | #define SDHCI_CMD_DATA 0x20 | 47 | #define SDHCI_CMD_DATA 0x20 |
48 | #define SDHCI_CMD_ABORTCMD 0xC0 | ||
48 | 49 | ||
49 | #define SDHCI_CMD_RESP_NONE 0x00 | 50 | #define SDHCI_CMD_RESP_NONE 0x00 |
50 | #define SDHCI_CMD_RESP_LONG 0x01 | 51 | #define SDHCI_CMD_RESP_LONG 0x01 |
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c index 53a63024bf11..cc701236d16f 100644 --- a/drivers/mfd/sh_mobile_sdhi.c +++ b/drivers/mmc/host/sh_mobile_sdhi.c | |||
@@ -23,51 +23,30 @@ | |||
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/mmc/host.h> | 25 | #include <linux/mmc/host.h> |
26 | #include <linux/mfd/core.h> | 26 | #include <linux/mmc/sh_mobile_sdhi.h> |
27 | #include <linux/mfd/tmio.h> | 27 | #include <linux/mfd/tmio.h> |
28 | #include <linux/mfd/sh_mobile_sdhi.h> | ||
29 | #include <linux/sh_dma.h> | 28 | #include <linux/sh_dma.h> |
30 | 29 | ||
30 | #include "tmio_mmc.h" | ||
31 | |||
31 | struct sh_mobile_sdhi { | 32 | struct sh_mobile_sdhi { |
32 | struct clk *clk; | 33 | struct clk *clk; |
33 | struct tmio_mmc_data mmc_data; | 34 | struct tmio_mmc_data mmc_data; |
34 | struct mfd_cell cell_mmc; | ||
35 | struct sh_dmae_slave param_tx; | 35 | struct sh_dmae_slave param_tx; |
36 | struct sh_dmae_slave param_rx; | 36 | struct sh_dmae_slave param_rx; |
37 | struct tmio_mmc_dma dma_priv; | 37 | struct tmio_mmc_dma dma_priv; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static struct resource sh_mobile_sdhi_resources[] = { | 40 | static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state) |
41 | { | ||
42 | .start = 0x000, | ||
43 | .end = 0x1ff, | ||
44 | .flags = IORESOURCE_MEM, | ||
45 | }, | ||
46 | { | ||
47 | .start = 0, | ||
48 | .end = 0, | ||
49 | .flags = IORESOURCE_IRQ, | ||
50 | }, | ||
51 | }; | ||
52 | |||
53 | static struct mfd_cell sh_mobile_sdhi_cell = { | ||
54 | .name = "tmio-mmc", | ||
55 | .num_resources = ARRAY_SIZE(sh_mobile_sdhi_resources), | ||
56 | .resources = sh_mobile_sdhi_resources, | ||
57 | }; | ||
58 | |||
59 | static void sh_mobile_sdhi_set_pwr(struct platform_device *tmio, int state) | ||
60 | { | 41 | { |
61 | struct platform_device *pdev = to_platform_device(tmio->dev.parent); | ||
62 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | 42 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; |
63 | 43 | ||
64 | if (p && p->set_pwr) | 44 | if (p && p->set_pwr) |
65 | p->set_pwr(pdev, state); | 45 | p->set_pwr(pdev, state); |
66 | } | 46 | } |
67 | 47 | ||
68 | static int sh_mobile_sdhi_get_cd(struct platform_device *tmio) | 48 | static int sh_mobile_sdhi_get_cd(struct platform_device *pdev) |
69 | { | 49 | { |
70 | struct platform_device *pdev = to_platform_device(tmio->dev.parent); | ||
71 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | 50 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; |
72 | 51 | ||
73 | if (p && p->get_cd) | 52 | if (p && p->get_cd) |
@@ -81,20 +60,9 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
81 | struct sh_mobile_sdhi *priv; | 60 | struct sh_mobile_sdhi *priv; |
82 | struct tmio_mmc_data *mmc_data; | 61 | struct tmio_mmc_data *mmc_data; |
83 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | 62 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; |
84 | struct resource *mem; | 63 | struct tmio_mmc_host *host; |
85 | char clk_name[8]; | 64 | char clk_name[8]; |
86 | int ret, irq; | 65 | int ret; |
87 | |||
88 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
89 | if (!mem) | ||
90 | dev_err(&pdev->dev, "missing MEM resource\n"); | ||
91 | |||
92 | irq = platform_get_irq(pdev, 0); | ||
93 | if (irq < 0) | ||
94 | dev_err(&pdev->dev, "missing IRQ resource\n"); | ||
95 | |||
96 | if (!mem || (irq < 0)) | ||
97 | return -EINVAL; | ||
98 | 66 | ||
99 | priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); | 67 | priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); |
100 | if (priv == NULL) { | 68 | if (priv == NULL) { |
@@ -109,8 +77,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
109 | if (IS_ERR(priv->clk)) { | 77 | if (IS_ERR(priv->clk)) { |
110 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); | 78 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); |
111 | ret = PTR_ERR(priv->clk); | 79 | ret = PTR_ERR(priv->clk); |
112 | kfree(priv); | 80 | goto eclkget; |
113 | return ret; | ||
114 | } | 81 | } |
115 | 82 | ||
116 | clk_enable(priv->clk); | 83 | clk_enable(priv->clk); |
@@ -123,6 +90,15 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
123 | mmc_data->flags = p->tmio_flags; | 90 | mmc_data->flags = p->tmio_flags; |
124 | mmc_data->ocr_mask = p->tmio_ocr_mask; | 91 | mmc_data->ocr_mask = p->tmio_ocr_mask; |
125 | mmc_data->capabilities |= p->tmio_caps; | 92 | mmc_data->capabilities |= p->tmio_caps; |
93 | |||
94 | if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { | ||
95 | priv->param_tx.slave_id = p->dma_slave_tx; | ||
96 | priv->param_rx.slave_id = p->dma_slave_rx; | ||
97 | priv->dma_priv.chan_priv_tx = &priv->param_tx; | ||
98 | priv->dma_priv.chan_priv_rx = &priv->param_rx; | ||
99 | priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ | ||
100 | mmc_data->dma = &priv->dma_priv; | ||
101 | } | ||
126 | } | 102 | } |
127 | 103 | ||
128 | /* | 104 | /* |
@@ -136,36 +112,30 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
136 | */ | 112 | */ |
137 | mmc_data->flags |= TMIO_MMC_SDIO_IRQ; | 113 | mmc_data->flags |= TMIO_MMC_SDIO_IRQ; |
138 | 114 | ||
139 | if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { | 115 | ret = tmio_mmc_host_probe(&host, pdev, mmc_data); |
140 | priv->param_tx.slave_id = p->dma_slave_tx; | 116 | if (ret < 0) |
141 | priv->param_rx.slave_id = p->dma_slave_rx; | 117 | goto eprobe; |
142 | priv->dma_priv.chan_priv_tx = &priv->param_tx; | ||
143 | priv->dma_priv.chan_priv_rx = &priv->param_rx; | ||
144 | priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ | ||
145 | mmc_data->dma = &priv->dma_priv; | ||
146 | } | ||
147 | 118 | ||
148 | memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc)); | 119 | pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), |
149 | priv->cell_mmc.mfd_data = mmc_data; | 120 | (unsigned long)host->ctl, host->irq); |
150 | 121 | ||
151 | platform_set_drvdata(pdev, priv); | 122 | return ret; |
152 | |||
153 | ret = mfd_add_devices(&pdev->dev, pdev->id, | ||
154 | &priv->cell_mmc, 1, mem, irq); | ||
155 | if (ret) { | ||
156 | clk_disable(priv->clk); | ||
157 | clk_put(priv->clk); | ||
158 | kfree(priv); | ||
159 | } | ||
160 | 123 | ||
124 | eprobe: | ||
125 | clk_disable(priv->clk); | ||
126 | clk_put(priv->clk); | ||
127 | eclkget: | ||
128 | kfree(priv); | ||
161 | return ret; | 129 | return ret; |
162 | } | 130 | } |
163 | 131 | ||
164 | static int sh_mobile_sdhi_remove(struct platform_device *pdev) | 132 | static int sh_mobile_sdhi_remove(struct platform_device *pdev) |
165 | { | 133 | { |
166 | struct sh_mobile_sdhi *priv = platform_get_drvdata(pdev); | 134 | struct mmc_host *mmc = platform_get_drvdata(pdev); |
135 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
136 | struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); | ||
167 | 137 | ||
168 | mfd_remove_devices(&pdev->dev); | 138 | tmio_mmc_host_remove(host); |
169 | clk_disable(priv->clk); | 139 | clk_disable(priv->clk); |
170 | clk_put(priv->clk); | 140 | clk_put(priv->clk); |
171 | kfree(priv); | 141 | kfree(priv); |
@@ -198,3 +168,4 @@ module_exit(sh_mobile_sdhi_exit); | |||
198 | MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); | 168 | MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); |
199 | MODULE_AUTHOR("Magnus Damm"); | 169 | MODULE_AUTHOR("Magnus Damm"); |
200 | MODULE_LICENSE("GPL v2"); | 170 | MODULE_LICENSE("GPL v2"); |
171 | MODULE_ALIAS("platform:sh_mobile_sdhi"); | ||
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index ab1adeabdd22..79c568461d59 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/mmc/tmio_mmc.c | 2 | * linux/drivers/mmc/host/tmio_mmc.c |
3 | * | 3 | * |
4 | * Copyright (C) 2004 Ian Molton | 4 | * Copyright (C) 2007 Ian Molton |
5 | * Copyright (C) 2007 Ian Molton | 5 | * Copyright (C) 2004 Ian Molton |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
@@ -11,1182 +11,17 @@ | |||
11 | * Driver for the MMC / SD / SDIO cell found in: | 11 | * Driver for the MMC / SD / SDIO cell found in: |
12 | * | 12 | * |
13 | * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 | 13 | * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 |
14 | * | ||
15 | * This driver draws mainly on scattered spec sheets, Reverse engineering | ||
16 | * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit | ||
17 | * support). (Further 4 bit support from a later datasheet). | ||
18 | * | ||
19 | * TODO: | ||
20 | * Investigate using a workqueue for PIO transfers | ||
21 | * Eliminate FIXMEs | ||
22 | * SDIO support | ||
23 | * Better Power management | ||
24 | * Handle MMC errors better | ||
25 | * double buffer support | ||
26 | * | ||
27 | */ | 14 | */ |
28 | 15 | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/device.h> | 16 | #include <linux/device.h> |
31 | #include <linux/dmaengine.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/irq.h> | ||
36 | #include <linux/mfd/core.h> | 17 | #include <linux/mfd/core.h> |
37 | #include <linux/mfd/tmio.h> | 18 | #include <linux/mfd/tmio.h> |
38 | #include <linux/mmc/host.h> | 19 | #include <linux/mmc/host.h> |
39 | #include <linux/module.h> | 20 | #include <linux/module.h> |
40 | #include <linux/pagemap.h> | 21 | #include <linux/pagemap.h> |
41 | #include <linux/scatterlist.h> | 22 | #include <linux/scatterlist.h> |
42 | #include <linux/workqueue.h> | ||
43 | #include <linux/spinlock.h> | ||
44 | |||
45 | #define CTL_SD_CMD 0x00 | ||
46 | #define CTL_ARG_REG 0x04 | ||
47 | #define CTL_STOP_INTERNAL_ACTION 0x08 | ||
48 | #define CTL_XFER_BLK_COUNT 0xa | ||
49 | #define CTL_RESPONSE 0x0c | ||
50 | #define CTL_STATUS 0x1c | ||
51 | #define CTL_IRQ_MASK 0x20 | ||
52 | #define CTL_SD_CARD_CLK_CTL 0x24 | ||
53 | #define CTL_SD_XFER_LEN 0x26 | ||
54 | #define CTL_SD_MEM_CARD_OPT 0x28 | ||
55 | #define CTL_SD_ERROR_DETAIL_STATUS 0x2c | ||
56 | #define CTL_SD_DATA_PORT 0x30 | ||
57 | #define CTL_TRANSACTION_CTL 0x34 | ||
58 | #define CTL_SDIO_STATUS 0x36 | ||
59 | #define CTL_SDIO_IRQ_MASK 0x38 | ||
60 | #define CTL_RESET_SD 0xe0 | ||
61 | #define CTL_SDIO_REGS 0x100 | ||
62 | #define CTL_CLK_AND_WAIT_CTL 0x138 | ||
63 | #define CTL_RESET_SDIO 0x1e0 | ||
64 | |||
65 | /* Definitions for values the CTRL_STATUS register can take. */ | ||
66 | #define TMIO_STAT_CMDRESPEND 0x00000001 | ||
67 | #define TMIO_STAT_DATAEND 0x00000004 | ||
68 | #define TMIO_STAT_CARD_REMOVE 0x00000008 | ||
69 | #define TMIO_STAT_CARD_INSERT 0x00000010 | ||
70 | #define TMIO_STAT_SIGSTATE 0x00000020 | ||
71 | #define TMIO_STAT_WRPROTECT 0x00000080 | ||
72 | #define TMIO_STAT_CARD_REMOVE_A 0x00000100 | ||
73 | #define TMIO_STAT_CARD_INSERT_A 0x00000200 | ||
74 | #define TMIO_STAT_SIGSTATE_A 0x00000400 | ||
75 | #define TMIO_STAT_CMD_IDX_ERR 0x00010000 | ||
76 | #define TMIO_STAT_CRCFAIL 0x00020000 | ||
77 | #define TMIO_STAT_STOPBIT_ERR 0x00040000 | ||
78 | #define TMIO_STAT_DATATIMEOUT 0x00080000 | ||
79 | #define TMIO_STAT_RXOVERFLOW 0x00100000 | ||
80 | #define TMIO_STAT_TXUNDERRUN 0x00200000 | ||
81 | #define TMIO_STAT_CMDTIMEOUT 0x00400000 | ||
82 | #define TMIO_STAT_RXRDY 0x01000000 | ||
83 | #define TMIO_STAT_TXRQ 0x02000000 | ||
84 | #define TMIO_STAT_ILL_FUNC 0x20000000 | ||
85 | #define TMIO_STAT_CMD_BUSY 0x40000000 | ||
86 | #define TMIO_STAT_ILL_ACCESS 0x80000000 | ||
87 | |||
88 | /* Definitions for values the CTRL_SDIO_STATUS register can take. */ | ||
89 | #define TMIO_SDIO_STAT_IOIRQ 0x0001 | ||
90 | #define TMIO_SDIO_STAT_EXPUB52 0x4000 | ||
91 | #define TMIO_SDIO_STAT_EXWT 0x8000 | ||
92 | #define TMIO_SDIO_MASK_ALL 0xc007 | ||
93 | |||
94 | /* Define some IRQ masks */ | ||
95 | /* This is the mask used at reset by the chip */ | ||
96 | #define TMIO_MASK_ALL 0x837f031d | ||
97 | #define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) | ||
98 | #define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) | ||
99 | #define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ | ||
100 | TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) | ||
101 | #define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) | ||
102 | |||
103 | #define enable_mmc_irqs(host, i) \ | ||
104 | do { \ | ||
105 | u32 mask;\ | ||
106 | mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \ | ||
107 | mask &= ~((i) & TMIO_MASK_IRQ); \ | ||
108 | sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ | ||
109 | } while (0) | ||
110 | |||
111 | #define disable_mmc_irqs(host, i) \ | ||
112 | do { \ | ||
113 | u32 mask;\ | ||
114 | mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \ | ||
115 | mask |= ((i) & TMIO_MASK_IRQ); \ | ||
116 | sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ | ||
117 | } while (0) | ||
118 | |||
119 | #define ack_mmc_irqs(host, i) \ | ||
120 | do { \ | ||
121 | sd_ctrl_write32((host), CTL_STATUS, ~(i)); \ | ||
122 | } while (0) | ||
123 | |||
124 | /* This is arbitrary, just noone needed any higher alignment yet */ | ||
125 | #define MAX_ALIGN 4 | ||
126 | |||
127 | struct tmio_mmc_host { | ||
128 | void __iomem *ctl; | ||
129 | unsigned long bus_shift; | ||
130 | struct mmc_command *cmd; | ||
131 | struct mmc_request *mrq; | ||
132 | struct mmc_data *data; | ||
133 | struct mmc_host *mmc; | ||
134 | int irq; | ||
135 | unsigned int sdio_irq_enabled; | ||
136 | |||
137 | /* Callbacks for clock / power control */ | ||
138 | void (*set_pwr)(struct platform_device *host, int state); | ||
139 | void (*set_clk_div)(struct platform_device *host, int state); | ||
140 | |||
141 | /* pio related stuff */ | ||
142 | struct scatterlist *sg_ptr; | ||
143 | struct scatterlist *sg_orig; | ||
144 | unsigned int sg_len; | ||
145 | unsigned int sg_off; | ||
146 | |||
147 | struct platform_device *pdev; | ||
148 | |||
149 | /* DMA support */ | ||
150 | struct dma_chan *chan_rx; | ||
151 | struct dma_chan *chan_tx; | ||
152 | struct tasklet_struct dma_complete; | ||
153 | struct tasklet_struct dma_issue; | ||
154 | #ifdef CONFIG_TMIO_MMC_DMA | ||
155 | u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN))); | ||
156 | struct scatterlist bounce_sg; | ||
157 | #endif | ||
158 | |||
159 | /* Track lost interrupts */ | ||
160 | struct delayed_work delayed_reset_work; | ||
161 | spinlock_t lock; | ||
162 | unsigned long last_req_ts; | ||
163 | }; | ||
164 | |||
165 | static void tmio_check_bounce_buffer(struct tmio_mmc_host *host); | ||
166 | |||
167 | static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) | ||
168 | { | ||
169 | return readw(host->ctl + (addr << host->bus_shift)); | ||
170 | } | ||
171 | |||
172 | static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, | ||
173 | u16 *buf, int count) | ||
174 | { | ||
175 | readsw(host->ctl + (addr << host->bus_shift), buf, count); | ||
176 | } | ||
177 | |||
178 | static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) | ||
179 | { | ||
180 | return readw(host->ctl + (addr << host->bus_shift)) | | ||
181 | readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; | ||
182 | } | ||
183 | |||
184 | static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) | ||
185 | { | ||
186 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
187 | } | ||
188 | |||
189 | static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, | ||
190 | u16 *buf, int count) | ||
191 | { | ||
192 | writesw(host->ctl + (addr << host->bus_shift), buf, count); | ||
193 | } | ||
194 | |||
195 | static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) | ||
196 | { | ||
197 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
198 | writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); | ||
199 | } | ||
200 | |||
201 | static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) | ||
202 | { | ||
203 | host->sg_len = data->sg_len; | ||
204 | host->sg_ptr = data->sg; | ||
205 | host->sg_orig = data->sg; | ||
206 | host->sg_off = 0; | ||
207 | } | ||
208 | |||
209 | static int tmio_mmc_next_sg(struct tmio_mmc_host *host) | ||
210 | { | ||
211 | host->sg_ptr = sg_next(host->sg_ptr); | ||
212 | host->sg_off = 0; | ||
213 | return --host->sg_len; | ||
214 | } | ||
215 | |||
216 | static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags) | ||
217 | { | ||
218 | local_irq_save(*flags); | ||
219 | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; | ||
220 | } | ||
221 | |||
222 | static void tmio_mmc_kunmap_atomic(struct scatterlist *sg, unsigned long *flags, void *virt) | ||
223 | { | ||
224 | kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); | ||
225 | local_irq_restore(*flags); | ||
226 | } | ||
227 | |||
228 | #ifdef CONFIG_MMC_DEBUG | ||
229 | |||
230 | #define STATUS_TO_TEXT(a, status, i) \ | ||
231 | do { \ | ||
232 | if (status & TMIO_STAT_##a) { \ | ||
233 | if (i++) \ | ||
234 | printk(" | "); \ | ||
235 | printk(#a); \ | ||
236 | } \ | ||
237 | } while (0) | ||
238 | |||
239 | void pr_debug_status(u32 status) | ||
240 | { | ||
241 | int i = 0; | ||
242 | printk(KERN_DEBUG "status: %08x = ", status); | ||
243 | STATUS_TO_TEXT(CARD_REMOVE, status, i); | ||
244 | STATUS_TO_TEXT(CARD_INSERT, status, i); | ||
245 | STATUS_TO_TEXT(SIGSTATE, status, i); | ||
246 | STATUS_TO_TEXT(WRPROTECT, status, i); | ||
247 | STATUS_TO_TEXT(CARD_REMOVE_A, status, i); | ||
248 | STATUS_TO_TEXT(CARD_INSERT_A, status, i); | ||
249 | STATUS_TO_TEXT(SIGSTATE_A, status, i); | ||
250 | STATUS_TO_TEXT(CMD_IDX_ERR, status, i); | ||
251 | STATUS_TO_TEXT(STOPBIT_ERR, status, i); | ||
252 | STATUS_TO_TEXT(ILL_FUNC, status, i); | ||
253 | STATUS_TO_TEXT(CMD_BUSY, status, i); | ||
254 | STATUS_TO_TEXT(CMDRESPEND, status, i); | ||
255 | STATUS_TO_TEXT(DATAEND, status, i); | ||
256 | STATUS_TO_TEXT(CRCFAIL, status, i); | ||
257 | STATUS_TO_TEXT(DATATIMEOUT, status, i); | ||
258 | STATUS_TO_TEXT(CMDTIMEOUT, status, i); | ||
259 | STATUS_TO_TEXT(RXOVERFLOW, status, i); | ||
260 | STATUS_TO_TEXT(TXUNDERRUN, status, i); | ||
261 | STATUS_TO_TEXT(RXRDY, status, i); | ||
262 | STATUS_TO_TEXT(TXRQ, status, i); | ||
263 | STATUS_TO_TEXT(ILL_ACCESS, status, i); | ||
264 | printk("\n"); | ||
265 | } | ||
266 | |||
267 | #else | ||
268 | #define pr_debug_status(s) do { } while (0) | ||
269 | #endif | ||
270 | |||
271 | static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) | ||
272 | { | ||
273 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
274 | |||
275 | if (enable) { | ||
276 | host->sdio_irq_enabled = 1; | ||
277 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); | ||
278 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, | ||
279 | (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); | ||
280 | } else { | ||
281 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); | ||
282 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); | ||
283 | host->sdio_irq_enabled = 0; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) | ||
288 | { | ||
289 | u32 clk = 0, clock; | ||
290 | |||
291 | if (new_clock) { | ||
292 | for (clock = host->mmc->f_min, clk = 0x80000080; | ||
293 | new_clock >= (clock<<1); clk >>= 1) | ||
294 | clock <<= 1; | ||
295 | clk |= 0x100; | ||
296 | } | ||
297 | |||
298 | if (host->set_clk_div) | ||
299 | host->set_clk_div(host->pdev, (clk>>22) & 1); | ||
300 | |||
301 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); | ||
302 | } | ||
303 | |||
304 | static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) | ||
305 | { | ||
306 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
307 | |||
308 | /* | ||
309 | * Testing on sh-mobile showed that SDIO IRQs are unmasked when | ||
310 | * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the | ||
311 | * device IRQ here and restore the SDIO IRQ mask before | ||
312 | * re-enabling the device IRQ. | ||
313 | */ | ||
314 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | ||
315 | disable_irq(host->irq); | ||
316 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); | ||
317 | msleep(10); | ||
318 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
319 | tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); | ||
320 | enable_irq(host->irq); | ||
321 | } | ||
322 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & | ||
323 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | ||
324 | msleep(10); | ||
325 | } | ||
326 | |||
327 | static void tmio_mmc_clk_start(struct tmio_mmc_host *host) | ||
328 | { | ||
329 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
330 | |||
331 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | | ||
332 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | ||
333 | msleep(10); | ||
334 | /* see comment in tmio_mmc_clk_stop above */ | ||
335 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | ||
336 | disable_irq(host->irq); | ||
337 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); | ||
338 | msleep(10); | ||
339 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
340 | tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); | ||
341 | enable_irq(host->irq); | ||
342 | } | ||
343 | } | ||
344 | |||
345 | static void reset(struct tmio_mmc_host *host) | ||
346 | { | ||
347 | /* FIXME - should we set stop clock reg here */ | ||
348 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); | ||
349 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); | ||
350 | msleep(10); | ||
351 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); | ||
352 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); | ||
353 | msleep(10); | ||
354 | } | ||
355 | |||
356 | static void tmio_mmc_reset_work(struct work_struct *work) | ||
357 | { | ||
358 | struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, | ||
359 | delayed_reset_work.work); | ||
360 | struct mmc_request *mrq; | ||
361 | unsigned long flags; | ||
362 | |||
363 | spin_lock_irqsave(&host->lock, flags); | ||
364 | mrq = host->mrq; | ||
365 | |||
366 | /* request already finished */ | ||
367 | if (!mrq | ||
368 | || time_is_after_jiffies(host->last_req_ts + | ||
369 | msecs_to_jiffies(2000))) { | ||
370 | spin_unlock_irqrestore(&host->lock, flags); | ||
371 | return; | ||
372 | } | ||
373 | |||
374 | dev_warn(&host->pdev->dev, | ||
375 | "timeout waiting for hardware interrupt (CMD%u)\n", | ||
376 | mrq->cmd->opcode); | ||
377 | |||
378 | if (host->data) | ||
379 | host->data->error = -ETIMEDOUT; | ||
380 | else if (host->cmd) | ||
381 | host->cmd->error = -ETIMEDOUT; | ||
382 | else | ||
383 | mrq->cmd->error = -ETIMEDOUT; | ||
384 | |||
385 | host->cmd = NULL; | ||
386 | host->data = NULL; | ||
387 | host->mrq = NULL; | ||
388 | |||
389 | spin_unlock_irqrestore(&host->lock, flags); | ||
390 | |||
391 | reset(host); | ||
392 | |||
393 | mmc_request_done(host->mmc, mrq); | ||
394 | } | ||
395 | |||
396 | static void | ||
397 | tmio_mmc_finish_request(struct tmio_mmc_host *host) | ||
398 | { | ||
399 | struct mmc_request *mrq = host->mrq; | ||
400 | |||
401 | if (!mrq) | ||
402 | return; | ||
403 | |||
404 | host->mrq = NULL; | ||
405 | host->cmd = NULL; | ||
406 | host->data = NULL; | ||
407 | |||
408 | cancel_delayed_work(&host->delayed_reset_work); | ||
409 | |||
410 | mmc_request_done(host->mmc, mrq); | ||
411 | } | ||
412 | |||
413 | /* These are the bitmasks the tmio chip requires to implement the MMC response | ||
414 | * types. Note that R1 and R6 are the same in this scheme. */ | ||
415 | #define APP_CMD 0x0040 | ||
416 | #define RESP_NONE 0x0300 | ||
417 | #define RESP_R1 0x0400 | ||
418 | #define RESP_R1B 0x0500 | ||
419 | #define RESP_R2 0x0600 | ||
420 | #define RESP_R3 0x0700 | ||
421 | #define DATA_PRESENT 0x0800 | ||
422 | #define TRANSFER_READ 0x1000 | ||
423 | #define TRANSFER_MULTI 0x2000 | ||
424 | #define SECURITY_CMD 0x4000 | ||
425 | |||
426 | static int | ||
427 | tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) | ||
428 | { | ||
429 | struct mmc_data *data = host->data; | ||
430 | int c = cmd->opcode; | ||
431 | |||
432 | /* Command 12 is handled by hardware */ | ||
433 | if (cmd->opcode == 12 && !cmd->arg) { | ||
434 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); | ||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | switch (mmc_resp_type(cmd)) { | ||
439 | case MMC_RSP_NONE: c |= RESP_NONE; break; | ||
440 | case MMC_RSP_R1: c |= RESP_R1; break; | ||
441 | case MMC_RSP_R1B: c |= RESP_R1B; break; | ||
442 | case MMC_RSP_R2: c |= RESP_R2; break; | ||
443 | case MMC_RSP_R3: c |= RESP_R3; break; | ||
444 | default: | ||
445 | pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); | ||
446 | return -EINVAL; | ||
447 | } | ||
448 | |||
449 | host->cmd = cmd; | ||
450 | |||
451 | /* FIXME - this seems to be ok commented out but the spec suggest this bit | ||
452 | * should be set when issuing app commands. | ||
453 | * if(cmd->flags & MMC_FLAG_ACMD) | ||
454 | * c |= APP_CMD; | ||
455 | */ | ||
456 | if (data) { | ||
457 | c |= DATA_PRESENT; | ||
458 | if (data->blocks > 1) { | ||
459 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); | ||
460 | c |= TRANSFER_MULTI; | ||
461 | } | ||
462 | if (data->flags & MMC_DATA_READ) | ||
463 | c |= TRANSFER_READ; | ||
464 | } | ||
465 | |||
466 | enable_mmc_irqs(host, TMIO_MASK_CMD); | ||
467 | |||
468 | /* Fire off the command */ | ||
469 | sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); | ||
470 | sd_ctrl_write16(host, CTL_SD_CMD, c); | ||
471 | |||
472 | return 0; | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * This chip always returns (at least?) as much data as you ask for. | ||
477 | * I'm unsure what happens if you ask for less than a block. This should be | ||
478 | * looked into to ensure that a funny length read doesnt hose the controller. | ||
479 | */ | ||
480 | static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) | ||
481 | { | ||
482 | struct mmc_data *data = host->data; | ||
483 | void *sg_virt; | ||
484 | unsigned short *buf; | ||
485 | unsigned int count; | ||
486 | unsigned long flags; | ||
487 | |||
488 | if (!data) { | ||
489 | pr_debug("Spurious PIO IRQ\n"); | ||
490 | return; | ||
491 | } | ||
492 | |||
493 | sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); | ||
494 | buf = (unsigned short *)(sg_virt + host->sg_off); | ||
495 | |||
496 | count = host->sg_ptr->length - host->sg_off; | ||
497 | if (count > data->blksz) | ||
498 | count = data->blksz; | ||
499 | |||
500 | pr_debug("count: %08x offset: %08x flags %08x\n", | ||
501 | count, host->sg_off, data->flags); | ||
502 | |||
503 | /* Transfer the data */ | ||
504 | if (data->flags & MMC_DATA_READ) | ||
505 | sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | ||
506 | else | ||
507 | sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | ||
508 | |||
509 | host->sg_off += count; | ||
510 | |||
511 | tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); | ||
512 | |||
513 | if (host->sg_off == host->sg_ptr->length) | ||
514 | tmio_mmc_next_sg(host); | ||
515 | |||
516 | return; | ||
517 | } | ||
518 | |||
519 | /* needs to be called with host->lock held */ | ||
520 | static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) | ||
521 | { | ||
522 | struct mmc_data *data = host->data; | ||
523 | struct mmc_command *stop; | ||
524 | |||
525 | host->data = NULL; | ||
526 | |||
527 | if (!data) { | ||
528 | dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); | ||
529 | return; | ||
530 | } | ||
531 | stop = data->stop; | ||
532 | |||
533 | /* FIXME - return correct transfer count on errors */ | ||
534 | if (!data->error) | ||
535 | data->bytes_xfered = data->blocks * data->blksz; | ||
536 | else | ||
537 | data->bytes_xfered = 0; | ||
538 | |||
539 | pr_debug("Completed data request\n"); | ||
540 | |||
541 | /* | ||
542 | * FIXME: other drivers allow an optional stop command of any given type | ||
543 | * which we dont do, as the chip can auto generate them. | ||
544 | * Perhaps we can be smarter about when to use auto CMD12 and | ||
545 | * only issue the auto request when we know this is the desired | ||
546 | * stop command, allowing fallback to the stop command the | ||
547 | * upper layers expect. For now, we do what works. | ||
548 | */ | ||
549 | |||
550 | if (data->flags & MMC_DATA_READ) { | ||
551 | if (!host->chan_rx) | ||
552 | disable_mmc_irqs(host, TMIO_MASK_READOP); | ||
553 | else | ||
554 | tmio_check_bounce_buffer(host); | ||
555 | dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", | ||
556 | host->mrq); | ||
557 | } else { | ||
558 | if (!host->chan_tx) | ||
559 | disable_mmc_irqs(host, TMIO_MASK_WRITEOP); | ||
560 | dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", | ||
561 | host->mrq); | ||
562 | } | ||
563 | |||
564 | if (stop) { | ||
565 | if (stop->opcode == 12 && !stop->arg) | ||
566 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); | ||
567 | else | ||
568 | BUG(); | ||
569 | } | ||
570 | |||
571 | tmio_mmc_finish_request(host); | ||
572 | } | ||
573 | |||
574 | static void tmio_mmc_data_irq(struct tmio_mmc_host *host) | ||
575 | { | ||
576 | struct mmc_data *data; | ||
577 | spin_lock(&host->lock); | ||
578 | data = host->data; | ||
579 | |||
580 | if (!data) | ||
581 | goto out; | ||
582 | |||
583 | if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { | ||
584 | /* | ||
585 | * Has all data been written out yet? Testing on SuperH showed, | ||
586 | * that in most cases the first interrupt comes already with the | ||
587 | * BUSY status bit clear, but on some operations, like mount or | ||
588 | * in the beginning of a write / sync / umount, there is one | ||
589 | * DATAEND interrupt with the BUSY bit set, in this cases | ||
590 | * waiting for one more interrupt fixes the problem. | ||
591 | */ | ||
592 | if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { | ||
593 | disable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
594 | tasklet_schedule(&host->dma_complete); | ||
595 | } | ||
596 | } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) { | ||
597 | disable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
598 | tasklet_schedule(&host->dma_complete); | ||
599 | } else { | ||
600 | tmio_mmc_do_data_irq(host); | ||
601 | } | ||
602 | out: | ||
603 | spin_unlock(&host->lock); | ||
604 | } | ||
605 | |||
606 | static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, | ||
607 | unsigned int stat) | ||
608 | { | ||
609 | struct mmc_command *cmd = host->cmd; | ||
610 | int i, addr; | ||
611 | |||
612 | spin_lock(&host->lock); | ||
613 | |||
614 | if (!host->cmd) { | ||
615 | pr_debug("Spurious CMD irq\n"); | ||
616 | goto out; | ||
617 | } | ||
618 | |||
619 | host->cmd = NULL; | ||
620 | |||
621 | /* This controller is sicker than the PXA one. Not only do we need to | ||
622 | * drop the top 8 bits of the first response word, we also need to | ||
623 | * modify the order of the response for short response command types. | ||
624 | */ | ||
625 | |||
626 | for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) | ||
627 | cmd->resp[i] = sd_ctrl_read32(host, addr); | ||
628 | |||
629 | if (cmd->flags & MMC_RSP_136) { | ||
630 | cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); | ||
631 | cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); | ||
632 | cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); | ||
633 | cmd->resp[3] <<= 8; | ||
634 | } else if (cmd->flags & MMC_RSP_R3) { | ||
635 | cmd->resp[0] = cmd->resp[3]; | ||
636 | } | ||
637 | |||
638 | if (stat & TMIO_STAT_CMDTIMEOUT) | ||
639 | cmd->error = -ETIMEDOUT; | ||
640 | else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) | ||
641 | cmd->error = -EILSEQ; | ||
642 | |||
643 | /* If there is data to handle we enable data IRQs here, and | ||
644 | * we will ultimatley finish the request in the data_end handler. | ||
645 | * If theres no data or we encountered an error, finish now. | ||
646 | */ | ||
647 | if (host->data && !cmd->error) { | ||
648 | if (host->data->flags & MMC_DATA_READ) { | ||
649 | if (!host->chan_rx) | ||
650 | enable_mmc_irqs(host, TMIO_MASK_READOP); | ||
651 | } else { | ||
652 | if (!host->chan_tx) | ||
653 | enable_mmc_irqs(host, TMIO_MASK_WRITEOP); | ||
654 | else | ||
655 | tasklet_schedule(&host->dma_issue); | ||
656 | } | ||
657 | } else { | ||
658 | tmio_mmc_finish_request(host); | ||
659 | } | ||
660 | |||
661 | out: | ||
662 | spin_unlock(&host->lock); | ||
663 | |||
664 | return; | ||
665 | } | ||
666 | |||
667 | static irqreturn_t tmio_mmc_irq(int irq, void *devid) | ||
668 | { | ||
669 | struct tmio_mmc_host *host = devid; | ||
670 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
671 | unsigned int ireg, irq_mask, status; | ||
672 | unsigned int sdio_ireg, sdio_irq_mask, sdio_status; | ||
673 | |||
674 | pr_debug("MMC IRQ begin\n"); | ||
675 | |||
676 | status = sd_ctrl_read32(host, CTL_STATUS); | ||
677 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | ||
678 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | ||
679 | |||
680 | sdio_ireg = 0; | ||
681 | if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
682 | sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); | ||
683 | sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); | ||
684 | sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; | ||
685 | |||
686 | sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); | ||
687 | |||
688 | if (sdio_ireg && !host->sdio_irq_enabled) { | ||
689 | pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", | ||
690 | sdio_status, sdio_irq_mask, sdio_ireg); | ||
691 | tmio_mmc_enable_sdio_irq(host->mmc, 0); | ||
692 | goto out; | ||
693 | } | ||
694 | 23 | ||
695 | if (host->mmc->caps & MMC_CAP_SDIO_IRQ && | 24 | #include "tmio_mmc.h" |
696 | sdio_ireg & TMIO_SDIO_STAT_IOIRQ) | ||
697 | mmc_signal_sdio_irq(host->mmc); | ||
698 | |||
699 | if (sdio_ireg) | ||
700 | goto out; | ||
701 | } | ||
702 | |||
703 | pr_debug_status(status); | ||
704 | pr_debug_status(ireg); | ||
705 | |||
706 | if (!ireg) { | ||
707 | disable_mmc_irqs(host, status & ~irq_mask); | ||
708 | |||
709 | pr_warning("tmio_mmc: Spurious irq, disabling! " | ||
710 | "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); | ||
711 | pr_debug_status(status); | ||
712 | |||
713 | goto out; | ||
714 | } | ||
715 | |||
716 | while (ireg) { | ||
717 | /* Card insert / remove attempts */ | ||
718 | if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { | ||
719 | ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | | ||
720 | TMIO_STAT_CARD_REMOVE); | ||
721 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | ||
722 | } | ||
723 | |||
724 | /* CRC and other errors */ | ||
725 | /* if (ireg & TMIO_STAT_ERR_IRQ) | ||
726 | * handled |= tmio_error_irq(host, irq, stat); | ||
727 | */ | ||
728 | |||
729 | /* Command completion */ | ||
730 | if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { | ||
731 | ack_mmc_irqs(host, | ||
732 | TMIO_STAT_CMDRESPEND | | ||
733 | TMIO_STAT_CMDTIMEOUT); | ||
734 | tmio_mmc_cmd_irq(host, status); | ||
735 | } | ||
736 | |||
737 | /* Data transfer */ | ||
738 | if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { | ||
739 | ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); | ||
740 | tmio_mmc_pio_irq(host); | ||
741 | } | ||
742 | |||
743 | /* Data transfer completion */ | ||
744 | if (ireg & TMIO_STAT_DATAEND) { | ||
745 | ack_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
746 | tmio_mmc_data_irq(host); | ||
747 | } | ||
748 | |||
749 | /* Check status - keep going until we've handled it all */ | ||
750 | status = sd_ctrl_read32(host, CTL_STATUS); | ||
751 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | ||
752 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | ||
753 | |||
754 | pr_debug("Status at end of loop: %08x\n", status); | ||
755 | pr_debug_status(status); | ||
756 | } | ||
757 | pr_debug("MMC IRQ end\n"); | ||
758 | |||
759 | out: | ||
760 | return IRQ_HANDLED; | ||
761 | } | ||
762 | |||
763 | #ifdef CONFIG_TMIO_MMC_DMA | ||
764 | static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) | ||
765 | { | ||
766 | if (host->sg_ptr == &host->bounce_sg) { | ||
767 | unsigned long flags; | ||
768 | void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); | ||
769 | memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); | ||
770 | tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); | ||
771 | } | ||
772 | } | ||
773 | |||
774 | static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) | ||
775 | { | ||
776 | #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) | ||
777 | /* Switch DMA mode on or off - SuperH specific? */ | ||
778 | sd_ctrl_write16(host, 0xd8, enable ? 2 : 0); | ||
779 | #endif | ||
780 | } | ||
781 | |||
782 | static void tmio_dma_complete(void *arg) | ||
783 | { | ||
784 | struct tmio_mmc_host *host = arg; | ||
785 | |||
786 | dev_dbg(&host->pdev->dev, "Command completed\n"); | ||
787 | |||
788 | if (!host->data) | ||
789 | dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n"); | ||
790 | else | ||
791 | enable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
792 | } | ||
793 | |||
794 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | ||
795 | { | ||
796 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | ||
797 | struct dma_async_tx_descriptor *desc = NULL; | ||
798 | struct dma_chan *chan = host->chan_rx; | ||
799 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
800 | dma_cookie_t cookie; | ||
801 | int ret, i; | ||
802 | bool aligned = true, multiple = true; | ||
803 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
804 | |||
805 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
806 | if (sg_tmp->offset & align) | ||
807 | aligned = false; | ||
808 | if (sg_tmp->length & align) { | ||
809 | multiple = false; | ||
810 | break; | ||
811 | } | ||
812 | } | ||
813 | |||
814 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
815 | align >= MAX_ALIGN)) || !multiple) { | ||
816 | ret = -EINVAL; | ||
817 | goto pio; | ||
818 | } | ||
819 | |||
820 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
821 | if (!aligned) { | ||
822 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
823 | host->sg_ptr = &host->bounce_sg; | ||
824 | sg = host->sg_ptr; | ||
825 | } | ||
826 | |||
827 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); | ||
828 | if (ret > 0) | ||
829 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | ||
830 | DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
831 | |||
832 | if (desc) { | ||
833 | desc->callback = tmio_dma_complete; | ||
834 | desc->callback_param = host; | ||
835 | cookie = dmaengine_submit(desc); | ||
836 | dma_async_issue_pending(chan); | ||
837 | } | ||
838 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | ||
839 | __func__, host->sg_len, ret, cookie, host->mrq); | ||
840 | |||
841 | pio: | ||
842 | if (!desc) { | ||
843 | /* DMA failed, fall back to PIO */ | ||
844 | if (ret >= 0) | ||
845 | ret = -EIO; | ||
846 | host->chan_rx = NULL; | ||
847 | dma_release_channel(chan); | ||
848 | /* Free the Tx channel too */ | ||
849 | chan = host->chan_tx; | ||
850 | if (chan) { | ||
851 | host->chan_tx = NULL; | ||
852 | dma_release_channel(chan); | ||
853 | } | ||
854 | dev_warn(&host->pdev->dev, | ||
855 | "DMA failed: %d, falling back to PIO\n", ret); | ||
856 | tmio_mmc_enable_dma(host, false); | ||
857 | } | ||
858 | |||
859 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, | ||
860 | desc, cookie, host->sg_len); | ||
861 | } | ||
862 | |||
863 | static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | ||
864 | { | ||
865 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | ||
866 | struct dma_async_tx_descriptor *desc = NULL; | ||
867 | struct dma_chan *chan = host->chan_tx; | ||
868 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
869 | dma_cookie_t cookie; | ||
870 | int ret, i; | ||
871 | bool aligned = true, multiple = true; | ||
872 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
873 | |||
874 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
875 | if (sg_tmp->offset & align) | ||
876 | aligned = false; | ||
877 | if (sg_tmp->length & align) { | ||
878 | multiple = false; | ||
879 | break; | ||
880 | } | ||
881 | } | ||
882 | |||
883 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
884 | align >= MAX_ALIGN)) || !multiple) { | ||
885 | ret = -EINVAL; | ||
886 | goto pio; | ||
887 | } | ||
888 | |||
889 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
890 | if (!aligned) { | ||
891 | unsigned long flags; | ||
892 | void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); | ||
893 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
894 | memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); | ||
895 | tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); | ||
896 | host->sg_ptr = &host->bounce_sg; | ||
897 | sg = host->sg_ptr; | ||
898 | } | ||
899 | |||
900 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); | ||
901 | if (ret > 0) | ||
902 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | ||
903 | DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
904 | |||
905 | if (desc) { | ||
906 | desc->callback = tmio_dma_complete; | ||
907 | desc->callback_param = host; | ||
908 | cookie = dmaengine_submit(desc); | ||
909 | } | ||
910 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | ||
911 | __func__, host->sg_len, ret, cookie, host->mrq); | ||
912 | |||
913 | pio: | ||
914 | if (!desc) { | ||
915 | /* DMA failed, fall back to PIO */ | ||
916 | if (ret >= 0) | ||
917 | ret = -EIO; | ||
918 | host->chan_tx = NULL; | ||
919 | dma_release_channel(chan); | ||
920 | /* Free the Rx channel too */ | ||
921 | chan = host->chan_rx; | ||
922 | if (chan) { | ||
923 | host->chan_rx = NULL; | ||
924 | dma_release_channel(chan); | ||
925 | } | ||
926 | dev_warn(&host->pdev->dev, | ||
927 | "DMA failed: %d, falling back to PIO\n", ret); | ||
928 | tmio_mmc_enable_dma(host, false); | ||
929 | } | ||
930 | |||
931 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, | ||
932 | desc, cookie); | ||
933 | } | ||
934 | |||
935 | static void tmio_mmc_start_dma(struct tmio_mmc_host *host, | ||
936 | struct mmc_data *data) | ||
937 | { | ||
938 | if (data->flags & MMC_DATA_READ) { | ||
939 | if (host->chan_rx) | ||
940 | tmio_mmc_start_dma_rx(host); | ||
941 | } else { | ||
942 | if (host->chan_tx) | ||
943 | tmio_mmc_start_dma_tx(host); | ||
944 | } | ||
945 | } | ||
946 | |||
947 | static void tmio_issue_tasklet_fn(unsigned long priv) | ||
948 | { | ||
949 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; | ||
950 | struct dma_chan *chan = host->chan_tx; | ||
951 | |||
952 | dma_async_issue_pending(chan); | ||
953 | } | ||
954 | |||
955 | static void tmio_tasklet_fn(unsigned long arg) | ||
956 | { | ||
957 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; | ||
958 | unsigned long flags; | ||
959 | |||
960 | spin_lock_irqsave(&host->lock, flags); | ||
961 | |||
962 | if (!host->data) | ||
963 | goto out; | ||
964 | |||
965 | if (host->data->flags & MMC_DATA_READ) | ||
966 | dma_unmap_sg(host->chan_rx->device->dev, | ||
967 | host->sg_ptr, host->sg_len, | ||
968 | DMA_FROM_DEVICE); | ||
969 | else | ||
970 | dma_unmap_sg(host->chan_tx->device->dev, | ||
971 | host->sg_ptr, host->sg_len, | ||
972 | DMA_TO_DEVICE); | ||
973 | |||
974 | tmio_mmc_do_data_irq(host); | ||
975 | out: | ||
976 | spin_unlock_irqrestore(&host->lock, flags); | ||
977 | } | ||
978 | |||
979 | /* It might be necessary to make filter MFD specific */ | ||
980 | static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) | ||
981 | { | ||
982 | dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); | ||
983 | chan->private = arg; | ||
984 | return true; | ||
985 | } | ||
986 | |||
987 | static void tmio_mmc_request_dma(struct tmio_mmc_host *host, | ||
988 | struct tmio_mmc_data *pdata) | ||
989 | { | ||
990 | /* We can only either use DMA for both Tx and Rx or not use it at all */ | ||
991 | if (pdata->dma) { | ||
992 | dma_cap_mask_t mask; | ||
993 | |||
994 | dma_cap_zero(mask); | ||
995 | dma_cap_set(DMA_SLAVE, mask); | ||
996 | |||
997 | host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, | ||
998 | pdata->dma->chan_priv_tx); | ||
999 | dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, | ||
1000 | host->chan_tx); | ||
1001 | |||
1002 | if (!host->chan_tx) | ||
1003 | return; | ||
1004 | |||
1005 | host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, | ||
1006 | pdata->dma->chan_priv_rx); | ||
1007 | dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, | ||
1008 | host->chan_rx); | ||
1009 | |||
1010 | if (!host->chan_rx) { | ||
1011 | dma_release_channel(host->chan_tx); | ||
1012 | host->chan_tx = NULL; | ||
1013 | return; | ||
1014 | } | ||
1015 | |||
1016 | tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host); | ||
1017 | tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host); | ||
1018 | |||
1019 | tmio_mmc_enable_dma(host, true); | ||
1020 | } | ||
1021 | } | ||
1022 | |||
1023 | static void tmio_mmc_release_dma(struct tmio_mmc_host *host) | ||
1024 | { | ||
1025 | if (host->chan_tx) { | ||
1026 | struct dma_chan *chan = host->chan_tx; | ||
1027 | host->chan_tx = NULL; | ||
1028 | dma_release_channel(chan); | ||
1029 | } | ||
1030 | if (host->chan_rx) { | ||
1031 | struct dma_chan *chan = host->chan_rx; | ||
1032 | host->chan_rx = NULL; | ||
1033 | dma_release_channel(chan); | ||
1034 | } | ||
1035 | } | ||
1036 | #else | ||
1037 | static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) | ||
1038 | { | ||
1039 | } | ||
1040 | |||
1041 | static void tmio_mmc_start_dma(struct tmio_mmc_host *host, | ||
1042 | struct mmc_data *data) | ||
1043 | { | ||
1044 | } | ||
1045 | |||
1046 | static void tmio_mmc_request_dma(struct tmio_mmc_host *host, | ||
1047 | struct tmio_mmc_data *pdata) | ||
1048 | { | ||
1049 | host->chan_tx = NULL; | ||
1050 | host->chan_rx = NULL; | ||
1051 | } | ||
1052 | |||
1053 | static void tmio_mmc_release_dma(struct tmio_mmc_host *host) | ||
1054 | { | ||
1055 | } | ||
1056 | #endif | ||
1057 | |||
1058 | static int tmio_mmc_start_data(struct tmio_mmc_host *host, | ||
1059 | struct mmc_data *data) | ||
1060 | { | ||
1061 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
1062 | |||
1063 | pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", | ||
1064 | data->blksz, data->blocks); | ||
1065 | |||
1066 | /* Some hardware cannot perform 2 byte requests in 4 bit mode */ | ||
1067 | if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { | ||
1068 | int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; | ||
1069 | |||
1070 | if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { | ||
1071 | pr_err("%s: %d byte block unsupported in 4 bit mode\n", | ||
1072 | mmc_hostname(host->mmc), data->blksz); | ||
1073 | return -EINVAL; | ||
1074 | } | ||
1075 | } | ||
1076 | |||
1077 | tmio_mmc_init_sg(host, data); | ||
1078 | host->data = data; | ||
1079 | |||
1080 | /* Set transfer length / blocksize */ | ||
1081 | sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); | ||
1082 | sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); | ||
1083 | |||
1084 | tmio_mmc_start_dma(host, data); | ||
1085 | |||
1086 | return 0; | ||
1087 | } | ||
1088 | |||
1089 | /* Process requests from the MMC layer */ | ||
1090 | static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | ||
1091 | { | ||
1092 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
1093 | int ret; | ||
1094 | |||
1095 | if (host->mrq) | ||
1096 | pr_debug("request not null\n"); | ||
1097 | |||
1098 | host->last_req_ts = jiffies; | ||
1099 | wmb(); | ||
1100 | host->mrq = mrq; | ||
1101 | |||
1102 | if (mrq->data) { | ||
1103 | ret = tmio_mmc_start_data(host, mrq->data); | ||
1104 | if (ret) | ||
1105 | goto fail; | ||
1106 | } | ||
1107 | |||
1108 | ret = tmio_mmc_start_command(host, mrq->cmd); | ||
1109 | if (!ret) { | ||
1110 | schedule_delayed_work(&host->delayed_reset_work, | ||
1111 | msecs_to_jiffies(2000)); | ||
1112 | return; | ||
1113 | } | ||
1114 | |||
1115 | fail: | ||
1116 | host->mrq = NULL; | ||
1117 | mrq->cmd->error = ret; | ||
1118 | mmc_request_done(mmc, mrq); | ||
1119 | } | ||
1120 | |||
1121 | /* Set MMC clock / power. | ||
1122 | * Note: This controller uses a simple divider scheme therefore it cannot | ||
1123 | * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as | ||
1124 | * MMC wont run that fast, it has to be clocked at 12MHz which is the next | ||
1125 | * slowest setting. | ||
1126 | */ | ||
1127 | static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | ||
1128 | { | ||
1129 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
1130 | |||
1131 | if (ios->clock) | ||
1132 | tmio_mmc_set_clock(host, ios->clock); | ||
1133 | |||
1134 | /* Power sequence - OFF -> ON -> UP */ | ||
1135 | switch (ios->power_mode) { | ||
1136 | case MMC_POWER_OFF: /* power down SD bus */ | ||
1137 | if (host->set_pwr) | ||
1138 | host->set_pwr(host->pdev, 0); | ||
1139 | tmio_mmc_clk_stop(host); | ||
1140 | break; | ||
1141 | case MMC_POWER_ON: /* power up SD bus */ | ||
1142 | if (host->set_pwr) | ||
1143 | host->set_pwr(host->pdev, 1); | ||
1144 | break; | ||
1145 | case MMC_POWER_UP: /* start bus clock */ | ||
1146 | tmio_mmc_clk_start(host); | ||
1147 | break; | ||
1148 | } | ||
1149 | |||
1150 | switch (ios->bus_width) { | ||
1151 | case MMC_BUS_WIDTH_1: | ||
1152 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); | ||
1153 | break; | ||
1154 | case MMC_BUS_WIDTH_4: | ||
1155 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); | ||
1156 | break; | ||
1157 | } | ||
1158 | |||
1159 | /* Let things settle. delay taken from winCE driver */ | ||
1160 | udelay(140); | ||
1161 | } | ||
1162 | |||
1163 | static int tmio_mmc_get_ro(struct mmc_host *mmc) | ||
1164 | { | ||
1165 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
1166 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
1167 | |||
1168 | return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || | ||
1169 | (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1; | ||
1170 | } | ||
1171 | |||
1172 | static int tmio_mmc_get_cd(struct mmc_host *mmc) | ||
1173 | { | ||
1174 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
1175 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
1176 | |||
1177 | if (!pdata->get_cd) | ||
1178 | return -ENOSYS; | ||
1179 | else | ||
1180 | return pdata->get_cd(host->pdev); | ||
1181 | } | ||
1182 | |||
1183 | static const struct mmc_host_ops tmio_mmc_ops = { | ||
1184 | .request = tmio_mmc_request, | ||
1185 | .set_ios = tmio_mmc_set_ios, | ||
1186 | .get_ro = tmio_mmc_get_ro, | ||
1187 | .get_cd = tmio_mmc_get_cd, | ||
1188 | .enable_sdio_irq = tmio_mmc_enable_sdio_irq, | ||
1189 | }; | ||
1190 | 25 | ||
1191 | #ifdef CONFIG_PM | 26 | #ifdef CONFIG_PM |
1192 | static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) | 27 | static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) |
@@ -1227,138 +62,54 @@ out: | |||
1227 | #define tmio_mmc_resume NULL | 62 | #define tmio_mmc_resume NULL |
1228 | #endif | 63 | #endif |
1229 | 64 | ||
1230 | static int __devinit tmio_mmc_probe(struct platform_device *dev) | 65 | static int __devinit tmio_mmc_probe(struct platform_device *pdev) |
1231 | { | 66 | { |
1232 | const struct mfd_cell *cell = mfd_get_cell(dev); | 67 | const struct mfd_cell *cell = mfd_get_cell(pdev); |
1233 | struct tmio_mmc_data *pdata; | 68 | struct tmio_mmc_data *pdata; |
1234 | struct resource *res_ctl; | ||
1235 | struct tmio_mmc_host *host; | 69 | struct tmio_mmc_host *host; |
1236 | struct mmc_host *mmc; | ||
1237 | int ret = -EINVAL; | 70 | int ret = -EINVAL; |
1238 | u32 irq_mask = TMIO_MASK_CMD; | ||
1239 | 71 | ||
1240 | if (dev->num_resources != 2) | 72 | if (pdev->num_resources != 2) |
1241 | goto out; | 73 | goto out; |
1242 | 74 | ||
1243 | res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); | 75 | pdata = mfd_get_data(pdev); |
1244 | if (!res_ctl) | ||
1245 | goto out; | ||
1246 | |||
1247 | pdata = mfd_get_data(dev); | ||
1248 | if (!pdata || !pdata->hclk) | 76 | if (!pdata || !pdata->hclk) |
1249 | goto out; | 77 | goto out; |
1250 | 78 | ||
1251 | ret = -ENOMEM; | ||
1252 | |||
1253 | mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev); | ||
1254 | if (!mmc) | ||
1255 | goto out; | ||
1256 | |||
1257 | host = mmc_priv(mmc); | ||
1258 | host->mmc = mmc; | ||
1259 | host->pdev = dev; | ||
1260 | platform_set_drvdata(dev, mmc); | ||
1261 | |||
1262 | host->set_pwr = pdata->set_pwr; | ||
1263 | host->set_clk_div = pdata->set_clk_div; | ||
1264 | |||
1265 | /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ | ||
1266 | host->bus_shift = resource_size(res_ctl) >> 10; | ||
1267 | |||
1268 | host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); | ||
1269 | if (!host->ctl) | ||
1270 | goto host_free; | ||
1271 | |||
1272 | mmc->ops = &tmio_mmc_ops; | ||
1273 | mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; | ||
1274 | mmc->f_max = pdata->hclk; | ||
1275 | mmc->f_min = mmc->f_max / 512; | ||
1276 | mmc->max_segs = 32; | ||
1277 | mmc->max_blk_size = 512; | ||
1278 | mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * | ||
1279 | mmc->max_segs; | ||
1280 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; | ||
1281 | mmc->max_seg_size = mmc->max_req_size; | ||
1282 | if (pdata->ocr_mask) | ||
1283 | mmc->ocr_avail = pdata->ocr_mask; | ||
1284 | else | ||
1285 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | ||
1286 | |||
1287 | /* Tell the MFD core we are ready to be enabled */ | 79 | /* Tell the MFD core we are ready to be enabled */ |
1288 | if (cell->enable) { | 80 | if (cell->enable) { |
1289 | ret = cell->enable(dev); | 81 | ret = cell->enable(pdev); |
1290 | if (ret) | 82 | if (ret) |
1291 | goto unmap_ctl; | 83 | goto out; |
1292 | } | 84 | } |
1293 | 85 | ||
1294 | tmio_mmc_clk_stop(host); | 86 | ret = tmio_mmc_host_probe(&host, pdev, pdata); |
1295 | reset(host); | ||
1296 | |||
1297 | ret = platform_get_irq(dev, 0); | ||
1298 | if (ret >= 0) | ||
1299 | host->irq = ret; | ||
1300 | else | ||
1301 | goto cell_disable; | ||
1302 | |||
1303 | disable_mmc_irqs(host, TMIO_MASK_ALL); | ||
1304 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | ||
1305 | tmio_mmc_enable_sdio_irq(mmc, 0); | ||
1306 | |||
1307 | ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | | ||
1308 | IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); | ||
1309 | if (ret) | 87 | if (ret) |
1310 | goto cell_disable; | 88 | goto cell_disable; |
1311 | 89 | ||
1312 | spin_lock_init(&host->lock); | ||
1313 | |||
1314 | /* Init delayed work for request timeouts */ | ||
1315 | INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work); | ||
1316 | |||
1317 | /* See if we also get DMA */ | ||
1318 | tmio_mmc_request_dma(host, pdata); | ||
1319 | |||
1320 | mmc_add_host(mmc); | ||
1321 | |||
1322 | pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), | 90 | pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), |
1323 | (unsigned long)host->ctl, host->irq); | 91 | (unsigned long)host->ctl, host->irq); |
1324 | 92 | ||
1325 | /* Unmask the IRQs we want to know about */ | ||
1326 | if (!host->chan_rx) | ||
1327 | irq_mask |= TMIO_MASK_READOP; | ||
1328 | if (!host->chan_tx) | ||
1329 | irq_mask |= TMIO_MASK_WRITEOP; | ||
1330 | enable_mmc_irqs(host, irq_mask); | ||
1331 | |||
1332 | return 0; | 93 | return 0; |
1333 | 94 | ||
1334 | cell_disable: | 95 | cell_disable: |
1335 | if (cell->disable) | 96 | if (cell->disable) |
1336 | cell->disable(dev); | 97 | cell->disable(pdev); |
1337 | unmap_ctl: | ||
1338 | iounmap(host->ctl); | ||
1339 | host_free: | ||
1340 | mmc_free_host(mmc); | ||
1341 | out: | 98 | out: |
1342 | return ret; | 99 | return ret; |
1343 | } | 100 | } |
1344 | 101 | ||
1345 | static int __devexit tmio_mmc_remove(struct platform_device *dev) | 102 | static int __devexit tmio_mmc_remove(struct platform_device *pdev) |
1346 | { | 103 | { |
1347 | const struct mfd_cell *cell = mfd_get_cell(dev); | 104 | const struct mfd_cell *cell = mfd_get_cell(pdev); |
1348 | struct mmc_host *mmc = platform_get_drvdata(dev); | 105 | struct mmc_host *mmc = platform_get_drvdata(pdev); |
1349 | 106 | ||
1350 | platform_set_drvdata(dev, NULL); | 107 | platform_set_drvdata(pdev, NULL); |
1351 | 108 | ||
1352 | if (mmc) { | 109 | if (mmc) { |
1353 | struct tmio_mmc_host *host = mmc_priv(mmc); | 110 | tmio_mmc_host_remove(mmc_priv(mmc)); |
1354 | mmc_remove_host(mmc); | ||
1355 | cancel_delayed_work_sync(&host->delayed_reset_work); | ||
1356 | tmio_mmc_release_dma(host); | ||
1357 | free_irq(host->irq, host); | ||
1358 | if (cell->disable) | 111 | if (cell->disable) |
1359 | cell->disable(dev); | 112 | cell->disable(pdev); |
1360 | iounmap(host->ctl); | ||
1361 | mmc_free_host(mmc); | ||
1362 | } | 113 | } |
1363 | 114 | ||
1364 | return 0; | 115 | return 0; |
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h new file mode 100644 index 000000000000..099ed49a259b --- /dev/null +++ b/drivers/mmc/host/tmio_mmc.h | |||
@@ -0,0 +1,123 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/host/tmio_mmc.h | ||
3 | * | ||
4 | * Copyright (C) 2007 Ian Molton | ||
5 | * Copyright (C) 2004 Ian Molton | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * Driver for the MMC / SD / SDIO cell found in: | ||
12 | * | ||
13 | * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 | ||
14 | */ | ||
15 | |||
16 | #ifndef TMIO_MMC_H | ||
17 | #define TMIO_MMC_H | ||
18 | |||
19 | #include <linux/highmem.h> | ||
20 | #include <linux/mmc/tmio.h> | ||
21 | #include <linux/pagemap.h> | ||
22 | |||
23 | /* Definitions for values the CTRL_SDIO_STATUS register can take. */ | ||
24 | #define TMIO_SDIO_STAT_IOIRQ 0x0001 | ||
25 | #define TMIO_SDIO_STAT_EXPUB52 0x4000 | ||
26 | #define TMIO_SDIO_STAT_EXWT 0x8000 | ||
27 | #define TMIO_SDIO_MASK_ALL 0xc007 | ||
28 | |||
29 | /* Define some IRQ masks */ | ||
30 | /* This is the mask used at reset by the chip */ | ||
31 | #define TMIO_MASK_ALL 0x837f031d | ||
32 | #define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) | ||
33 | #define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) | ||
34 | #define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ | ||
35 | TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) | ||
36 | #define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) | ||
37 | |||
38 | struct tmio_mmc_data; | ||
39 | |||
40 | struct tmio_mmc_host { | ||
41 | void __iomem *ctl; | ||
42 | unsigned long bus_shift; | ||
43 | struct mmc_command *cmd; | ||
44 | struct mmc_request *mrq; | ||
45 | struct mmc_data *data; | ||
46 | struct mmc_host *mmc; | ||
47 | int irq; | ||
48 | unsigned int sdio_irq_enabled; | ||
49 | |||
50 | /* Callbacks for clock / power control */ | ||
51 | void (*set_pwr)(struct platform_device *host, int state); | ||
52 | void (*set_clk_div)(struct platform_device *host, int state); | ||
53 | |||
54 | /* pio related stuff */ | ||
55 | struct scatterlist *sg_ptr; | ||
56 | struct scatterlist *sg_orig; | ||
57 | unsigned int sg_len; | ||
58 | unsigned int sg_off; | ||
59 | |||
60 | struct platform_device *pdev; | ||
61 | struct tmio_mmc_data *pdata; | ||
62 | |||
63 | /* DMA support */ | ||
64 | bool force_pio; | ||
65 | struct dma_chan *chan_rx; | ||
66 | struct dma_chan *chan_tx; | ||
67 | struct tasklet_struct dma_complete; | ||
68 | struct tasklet_struct dma_issue; | ||
69 | struct scatterlist bounce_sg; | ||
70 | u8 *bounce_buf; | ||
71 | |||
72 | /* Track lost interrupts */ | ||
73 | struct delayed_work delayed_reset_work; | ||
74 | spinlock_t lock; | ||
75 | unsigned long last_req_ts; | ||
76 | }; | ||
77 | |||
78 | int tmio_mmc_host_probe(struct tmio_mmc_host **host, | ||
79 | struct platform_device *pdev, | ||
80 | struct tmio_mmc_data *pdata); | ||
81 | void tmio_mmc_host_remove(struct tmio_mmc_host *host); | ||
82 | void tmio_mmc_do_data_irq(struct tmio_mmc_host *host); | ||
83 | |||
84 | void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); | ||
85 | void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); | ||
86 | |||
87 | static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, | ||
88 | unsigned long *flags) | ||
89 | { | ||
90 | local_irq_save(*flags); | ||
91 | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; | ||
92 | } | ||
93 | |||
94 | static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg, | ||
95 | unsigned long *flags, void *virt) | ||
96 | { | ||
97 | kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); | ||
98 | local_irq_restore(*flags); | ||
99 | } | ||
100 | |||
101 | #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) | ||
102 | void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data); | ||
103 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); | ||
104 | void tmio_mmc_release_dma(struct tmio_mmc_host *host); | ||
105 | #else | ||
106 | static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, | ||
107 | struct mmc_data *data) | ||
108 | { | ||
109 | } | ||
110 | |||
111 | static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, | ||
112 | struct tmio_mmc_data *pdata) | ||
113 | { | ||
114 | host->chan_tx = NULL; | ||
115 | host->chan_rx = NULL; | ||
116 | } | ||
117 | |||
118 | static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) | ||
119 | { | ||
120 | } | ||
121 | #endif | ||
122 | |||
123 | #endif | ||
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c new file mode 100644 index 000000000000..d3de74ab633e --- /dev/null +++ b/drivers/mmc/host/tmio_mmc_dma.c | |||
@@ -0,0 +1,317 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/tmio_mmc_dma.c | ||
3 | * | ||
4 | * Copyright (C) 2010-2011 Guennadi Liakhovetski | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * DMA function for TMIO MMC implementations | ||
11 | */ | ||
12 | |||
13 | #include <linux/device.h> | ||
14 | #include <linux/dmaengine.h> | ||
15 | #include <linux/mfd/tmio.h> | ||
16 | #include <linux/mmc/host.h> | ||
17 | #include <linux/mmc/tmio.h> | ||
18 | #include <linux/pagemap.h> | ||
19 | #include <linux/scatterlist.h> | ||
20 | |||
21 | #include "tmio_mmc.h" | ||
22 | |||
23 | #define TMIO_MMC_MIN_DMA_LEN 8 | ||
24 | |||
25 | static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) | ||
26 | { | ||
27 | #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) | ||
28 | /* Switch DMA mode on or off - SuperH specific? */ | ||
29 | writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift)); | ||
30 | #endif | ||
31 | } | ||
32 | |||
33 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | ||
34 | { | ||
35 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | ||
36 | struct dma_async_tx_descriptor *desc = NULL; | ||
37 | struct dma_chan *chan = host->chan_rx; | ||
38 | struct tmio_mmc_data *pdata = host->pdata; | ||
39 | dma_cookie_t cookie; | ||
40 | int ret, i; | ||
41 | bool aligned = true, multiple = true; | ||
42 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
43 | |||
44 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
45 | if (sg_tmp->offset & align) | ||
46 | aligned = false; | ||
47 | if (sg_tmp->length & align) { | ||
48 | multiple = false; | ||
49 | break; | ||
50 | } | ||
51 | } | ||
52 | |||
53 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
54 | (align & PAGE_MASK))) || !multiple) { | ||
55 | ret = -EINVAL; | ||
56 | goto pio; | ||
57 | } | ||
58 | |||
59 | if (sg->length < TMIO_MMC_MIN_DMA_LEN) { | ||
60 | host->force_pio = true; | ||
61 | return; | ||
62 | } | ||
63 | |||
64 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); | ||
65 | |||
66 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
67 | if (!aligned) { | ||
68 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
69 | host->sg_ptr = &host->bounce_sg; | ||
70 | sg = host->sg_ptr; | ||
71 | } | ||
72 | |||
73 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); | ||
74 | if (ret > 0) | ||
75 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | ||
76 | DMA_FROM_DEVICE, DMA_CTRL_ACK); | ||
77 | |||
78 | if (desc) { | ||
79 | cookie = dmaengine_submit(desc); | ||
80 | if (cookie < 0) { | ||
81 | desc = NULL; | ||
82 | ret = cookie; | ||
83 | } | ||
84 | } | ||
85 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | ||
86 | __func__, host->sg_len, ret, cookie, host->mrq); | ||
87 | |||
88 | pio: | ||
89 | if (!desc) { | ||
90 | /* DMA failed, fall back to PIO */ | ||
91 | if (ret >= 0) | ||
92 | ret = -EIO; | ||
93 | host->chan_rx = NULL; | ||
94 | dma_release_channel(chan); | ||
95 | /* Free the Tx channel too */ | ||
96 | chan = host->chan_tx; | ||
97 | if (chan) { | ||
98 | host->chan_tx = NULL; | ||
99 | dma_release_channel(chan); | ||
100 | } | ||
101 | dev_warn(&host->pdev->dev, | ||
102 | "DMA failed: %d, falling back to PIO\n", ret); | ||
103 | tmio_mmc_enable_dma(host, false); | ||
104 | } | ||
105 | |||
106 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, | ||
107 | desc, cookie, host->sg_len); | ||
108 | } | ||
109 | |||
110 | static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | ||
111 | { | ||
112 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | ||
113 | struct dma_async_tx_descriptor *desc = NULL; | ||
114 | struct dma_chan *chan = host->chan_tx; | ||
115 | struct tmio_mmc_data *pdata = host->pdata; | ||
116 | dma_cookie_t cookie; | ||
117 | int ret, i; | ||
118 | bool aligned = true, multiple = true; | ||
119 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
120 | |||
121 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
122 | if (sg_tmp->offset & align) | ||
123 | aligned = false; | ||
124 | if (sg_tmp->length & align) { | ||
125 | multiple = false; | ||
126 | break; | ||
127 | } | ||
128 | } | ||
129 | |||
130 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
131 | (align & PAGE_MASK))) || !multiple) { | ||
132 | ret = -EINVAL; | ||
133 | goto pio; | ||
134 | } | ||
135 | |||
136 | if (sg->length < TMIO_MMC_MIN_DMA_LEN) { | ||
137 | host->force_pio = true; | ||
138 | return; | ||
139 | } | ||
140 | |||
141 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); | ||
142 | |||
143 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
144 | if (!aligned) { | ||
145 | unsigned long flags; | ||
146 | void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); | ||
147 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
148 | memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); | ||
149 | tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); | ||
150 | host->sg_ptr = &host->bounce_sg; | ||
151 | sg = host->sg_ptr; | ||
152 | } | ||
153 | |||
154 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); | ||
155 | if (ret > 0) | ||
156 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | ||
157 | DMA_TO_DEVICE, DMA_CTRL_ACK); | ||
158 | |||
159 | if (desc) { | ||
160 | cookie = dmaengine_submit(desc); | ||
161 | if (cookie < 0) { | ||
162 | desc = NULL; | ||
163 | ret = cookie; | ||
164 | } | ||
165 | } | ||
166 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | ||
167 | __func__, host->sg_len, ret, cookie, host->mrq); | ||
168 | |||
169 | pio: | ||
170 | if (!desc) { | ||
171 | /* DMA failed, fall back to PIO */ | ||
172 | if (ret >= 0) | ||
173 | ret = -EIO; | ||
174 | host->chan_tx = NULL; | ||
175 | dma_release_channel(chan); | ||
176 | /* Free the Rx channel too */ | ||
177 | chan = host->chan_rx; | ||
178 | if (chan) { | ||
179 | host->chan_rx = NULL; | ||
180 | dma_release_channel(chan); | ||
181 | } | ||
182 | dev_warn(&host->pdev->dev, | ||
183 | "DMA failed: %d, falling back to PIO\n", ret); | ||
184 | tmio_mmc_enable_dma(host, false); | ||
185 | } | ||
186 | |||
187 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, | ||
188 | desc, cookie); | ||
189 | } | ||
190 | |||
191 | void tmio_mmc_start_dma(struct tmio_mmc_host *host, | ||
192 | struct mmc_data *data) | ||
193 | { | ||
194 | if (data->flags & MMC_DATA_READ) { | ||
195 | if (host->chan_rx) | ||
196 | tmio_mmc_start_dma_rx(host); | ||
197 | } else { | ||
198 | if (host->chan_tx) | ||
199 | tmio_mmc_start_dma_tx(host); | ||
200 | } | ||
201 | } | ||
202 | |||
203 | static void tmio_mmc_issue_tasklet_fn(unsigned long priv) | ||
204 | { | ||
205 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; | ||
206 | struct dma_chan *chan = NULL; | ||
207 | |||
208 | spin_lock_irq(&host->lock); | ||
209 | |||
210 | if (host && host->data) { | ||
211 | if (host->data->flags & MMC_DATA_READ) | ||
212 | chan = host->chan_rx; | ||
213 | else | ||
214 | chan = host->chan_tx; | ||
215 | } | ||
216 | |||
217 | spin_unlock_irq(&host->lock); | ||
218 | |||
219 | tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
220 | |||
221 | if (chan) | ||
222 | dma_async_issue_pending(chan); | ||
223 | } | ||
224 | |||
225 | static void tmio_mmc_tasklet_fn(unsigned long arg) | ||
226 | { | ||
227 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; | ||
228 | |||
229 | spin_lock_irq(&host->lock); | ||
230 | |||
231 | if (!host->data) | ||
232 | goto out; | ||
233 | |||
234 | if (host->data->flags & MMC_DATA_READ) | ||
235 | dma_unmap_sg(host->chan_rx->device->dev, | ||
236 | host->sg_ptr, host->sg_len, | ||
237 | DMA_FROM_DEVICE); | ||
238 | else | ||
239 | dma_unmap_sg(host->chan_tx->device->dev, | ||
240 | host->sg_ptr, host->sg_len, | ||
241 | DMA_TO_DEVICE); | ||
242 | |||
243 | tmio_mmc_do_data_irq(host); | ||
244 | out: | ||
245 | spin_unlock_irq(&host->lock); | ||
246 | } | ||
247 | |||
248 | /* It might be necessary to make filter MFD specific */ | ||
249 | static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) | ||
250 | { | ||
251 | dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); | ||
252 | chan->private = arg; | ||
253 | return true; | ||
254 | } | ||
255 | |||
256 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) | ||
257 | { | ||
258 | /* We can only either use DMA for both Tx and Rx or not use it at all */ | ||
259 | if (pdata->dma) { | ||
260 | dma_cap_mask_t mask; | ||
261 | |||
262 | dma_cap_zero(mask); | ||
263 | dma_cap_set(DMA_SLAVE, mask); | ||
264 | |||
265 | host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, | ||
266 | pdata->dma->chan_priv_tx); | ||
267 | dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, | ||
268 | host->chan_tx); | ||
269 | |||
270 | if (!host->chan_tx) | ||
271 | return; | ||
272 | |||
273 | host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, | ||
274 | pdata->dma->chan_priv_rx); | ||
275 | dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, | ||
276 | host->chan_rx); | ||
277 | |||
278 | if (!host->chan_rx) | ||
279 | goto ereqrx; | ||
280 | |||
281 | host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); | ||
282 | if (!host->bounce_buf) | ||
283 | goto ebouncebuf; | ||
284 | |||
285 | tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); | ||
286 | tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); | ||
287 | |||
288 | tmio_mmc_enable_dma(host, true); | ||
289 | |||
290 | return; | ||
291 | ebouncebuf: | ||
292 | dma_release_channel(host->chan_rx); | ||
293 | host->chan_rx = NULL; | ||
294 | ereqrx: | ||
295 | dma_release_channel(host->chan_tx); | ||
296 | host->chan_tx = NULL; | ||
297 | return; | ||
298 | } | ||
299 | } | ||
300 | |||
301 | void tmio_mmc_release_dma(struct tmio_mmc_host *host) | ||
302 | { | ||
303 | if (host->chan_tx) { | ||
304 | struct dma_chan *chan = host->chan_tx; | ||
305 | host->chan_tx = NULL; | ||
306 | dma_release_channel(chan); | ||
307 | } | ||
308 | if (host->chan_rx) { | ||
309 | struct dma_chan *chan = host->chan_rx; | ||
310 | host->chan_rx = NULL; | ||
311 | dma_release_channel(chan); | ||
312 | } | ||
313 | if (host->bounce_buf) { | ||
314 | free_pages((unsigned long)host->bounce_buf, 0); | ||
315 | host->bounce_buf = NULL; | ||
316 | } | ||
317 | } | ||
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c new file mode 100644 index 000000000000..6ae8d2f00ec7 --- /dev/null +++ b/drivers/mmc/host/tmio_mmc_pio.c | |||
@@ -0,0 +1,897 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/host/tmio_mmc_pio.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Guennadi Liakhovetski | ||
5 | * Copyright (C) 2007 Ian Molton | ||
6 | * Copyright (C) 2004 Ian Molton | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * Driver for the MMC / SD / SDIO IP found in: | ||
13 | * | ||
14 | * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs | ||
15 | * | ||
16 | * This driver draws mainly on scattered spec sheets, Reverse engineering | ||
17 | * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit | ||
18 | * support). (Further 4 bit support from a later datasheet). | ||
19 | * | ||
20 | * TODO: | ||
21 | * Investigate using a workqueue for PIO transfers | ||
22 | * Eliminate FIXMEs | ||
23 | * SDIO support | ||
24 | * Better Power management | ||
25 | * Handle MMC errors better | ||
26 | * double buffer support | ||
27 | * | ||
28 | */ | ||
29 | |||
30 | #include <linux/delay.h> | ||
31 | #include <linux/device.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/irq.h> | ||
36 | #include <linux/mfd/tmio.h> | ||
37 | #include <linux/mmc/host.h> | ||
38 | #include <linux/mmc/tmio.h> | ||
39 | #include <linux/module.h> | ||
40 | #include <linux/pagemap.h> | ||
41 | #include <linux/platform_device.h> | ||
42 | #include <linux/scatterlist.h> | ||
43 | #include <linux/workqueue.h> | ||
44 | #include <linux/spinlock.h> | ||
45 | |||
46 | #include "tmio_mmc.h" | ||
47 | |||
48 | static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) | ||
49 | { | ||
50 | return readw(host->ctl + (addr << host->bus_shift)); | ||
51 | } | ||
52 | |||
53 | static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, | ||
54 | u16 *buf, int count) | ||
55 | { | ||
56 | readsw(host->ctl + (addr << host->bus_shift), buf, count); | ||
57 | } | ||
58 | |||
59 | static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) | ||
60 | { | ||
61 | return readw(host->ctl + (addr << host->bus_shift)) | | ||
62 | readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; | ||
63 | } | ||
64 | |||
65 | static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) | ||
66 | { | ||
67 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
68 | } | ||
69 | |||
70 | static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, | ||
71 | u16 *buf, int count) | ||
72 | { | ||
73 | writesw(host->ctl + (addr << host->bus_shift), buf, count); | ||
74 | } | ||
75 | |||
76 | static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) | ||
77 | { | ||
78 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
79 | writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); | ||
80 | } | ||
81 | |||
82 | void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) | ||
83 | { | ||
84 | u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ); | ||
85 | sd_ctrl_write32(host, CTL_IRQ_MASK, mask); | ||
86 | } | ||
87 | |||
88 | void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) | ||
89 | { | ||
90 | u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ); | ||
91 | sd_ctrl_write32(host, CTL_IRQ_MASK, mask); | ||
92 | } | ||
93 | |||
94 | static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) | ||
95 | { | ||
96 | sd_ctrl_write32(host, CTL_STATUS, ~i); | ||
97 | } | ||
98 | |||
99 | static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) | ||
100 | { | ||
101 | host->sg_len = data->sg_len; | ||
102 | host->sg_ptr = data->sg; | ||
103 | host->sg_orig = data->sg; | ||
104 | host->sg_off = 0; | ||
105 | } | ||
106 | |||
107 | static int tmio_mmc_next_sg(struct tmio_mmc_host *host) | ||
108 | { | ||
109 | host->sg_ptr = sg_next(host->sg_ptr); | ||
110 | host->sg_off = 0; | ||
111 | return --host->sg_len; | ||
112 | } | ||
113 | |||
114 | #ifdef CONFIG_MMC_DEBUG | ||
115 | |||
116 | #define STATUS_TO_TEXT(a, status, i) \ | ||
117 | do { \ | ||
118 | if (status & TMIO_STAT_##a) { \ | ||
119 | if (i++) \ | ||
120 | printk(" | "); \ | ||
121 | printk(#a); \ | ||
122 | } \ | ||
123 | } while (0) | ||
124 | |||
125 | static void pr_debug_status(u32 status) | ||
126 | { | ||
127 | int i = 0; | ||
128 | printk(KERN_DEBUG "status: %08x = ", status); | ||
129 | STATUS_TO_TEXT(CARD_REMOVE, status, i); | ||
130 | STATUS_TO_TEXT(CARD_INSERT, status, i); | ||
131 | STATUS_TO_TEXT(SIGSTATE, status, i); | ||
132 | STATUS_TO_TEXT(WRPROTECT, status, i); | ||
133 | STATUS_TO_TEXT(CARD_REMOVE_A, status, i); | ||
134 | STATUS_TO_TEXT(CARD_INSERT_A, status, i); | ||
135 | STATUS_TO_TEXT(SIGSTATE_A, status, i); | ||
136 | STATUS_TO_TEXT(CMD_IDX_ERR, status, i); | ||
137 | STATUS_TO_TEXT(STOPBIT_ERR, status, i); | ||
138 | STATUS_TO_TEXT(ILL_FUNC, status, i); | ||
139 | STATUS_TO_TEXT(CMD_BUSY, status, i); | ||
140 | STATUS_TO_TEXT(CMDRESPEND, status, i); | ||
141 | STATUS_TO_TEXT(DATAEND, status, i); | ||
142 | STATUS_TO_TEXT(CRCFAIL, status, i); | ||
143 | STATUS_TO_TEXT(DATATIMEOUT, status, i); | ||
144 | STATUS_TO_TEXT(CMDTIMEOUT, status, i); | ||
145 | STATUS_TO_TEXT(RXOVERFLOW, status, i); | ||
146 | STATUS_TO_TEXT(TXUNDERRUN, status, i); | ||
147 | STATUS_TO_TEXT(RXRDY, status, i); | ||
148 | STATUS_TO_TEXT(TXRQ, status, i); | ||
149 | STATUS_TO_TEXT(ILL_ACCESS, status, i); | ||
150 | printk("\n"); | ||
151 | } | ||
152 | |||
153 | #else | ||
154 | #define pr_debug_status(s) do { } while (0) | ||
155 | #endif | ||
156 | |||
157 | static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) | ||
158 | { | ||
159 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
160 | |||
161 | if (enable) { | ||
162 | host->sdio_irq_enabled = 1; | ||
163 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); | ||
164 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, | ||
165 | (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); | ||
166 | } else { | ||
167 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); | ||
168 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); | ||
169 | host->sdio_irq_enabled = 0; | ||
170 | } | ||
171 | } | ||
172 | |||
173 | static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) | ||
174 | { | ||
175 | u32 clk = 0, clock; | ||
176 | |||
177 | if (new_clock) { | ||
178 | for (clock = host->mmc->f_min, clk = 0x80000080; | ||
179 | new_clock >= (clock<<1); clk >>= 1) | ||
180 | clock <<= 1; | ||
181 | clk |= 0x100; | ||
182 | } | ||
183 | |||
184 | if (host->set_clk_div) | ||
185 | host->set_clk_div(host->pdev, (clk>>22) & 1); | ||
186 | |||
187 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); | ||
188 | } | ||
189 | |||
190 | static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) | ||
191 | { | ||
192 | struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); | ||
193 | |||
194 | /* implicit BUG_ON(!res) */ | ||
195 | if (resource_size(res) > 0x100) { | ||
196 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); | ||
197 | msleep(10); | ||
198 | } | ||
199 | |||
200 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & | ||
201 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | ||
202 | msleep(10); | ||
203 | } | ||
204 | |||
205 | static void tmio_mmc_clk_start(struct tmio_mmc_host *host) | ||
206 | { | ||
207 | struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); | ||
208 | |||
209 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | | ||
210 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | ||
211 | msleep(10); | ||
212 | |||
213 | /* implicit BUG_ON(!res) */ | ||
214 | if (resource_size(res) > 0x100) { | ||
215 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); | ||
216 | msleep(10); | ||
217 | } | ||
218 | } | ||
219 | |||
220 | static void tmio_mmc_reset(struct tmio_mmc_host *host) | ||
221 | { | ||
222 | struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); | ||
223 | |||
224 | /* FIXME - should we set stop clock reg here */ | ||
225 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); | ||
226 | /* implicit BUG_ON(!res) */ | ||
227 | if (resource_size(res) > 0x100) | ||
228 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); | ||
229 | msleep(10); | ||
230 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); | ||
231 | if (resource_size(res) > 0x100) | ||
232 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); | ||
233 | msleep(10); | ||
234 | } | ||
235 | |||
236 | static void tmio_mmc_reset_work(struct work_struct *work) | ||
237 | { | ||
238 | struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, | ||
239 | delayed_reset_work.work); | ||
240 | struct mmc_request *mrq; | ||
241 | unsigned long flags; | ||
242 | |||
243 | spin_lock_irqsave(&host->lock, flags); | ||
244 | mrq = host->mrq; | ||
245 | |||
246 | /* request already finished */ | ||
247 | if (!mrq | ||
248 | || time_is_after_jiffies(host->last_req_ts + | ||
249 | msecs_to_jiffies(2000))) { | ||
250 | spin_unlock_irqrestore(&host->lock, flags); | ||
251 | return; | ||
252 | } | ||
253 | |||
254 | dev_warn(&host->pdev->dev, | ||
255 | "timeout waiting for hardware interrupt (CMD%u)\n", | ||
256 | mrq->cmd->opcode); | ||
257 | |||
258 | if (host->data) | ||
259 | host->data->error = -ETIMEDOUT; | ||
260 | else if (host->cmd) | ||
261 | host->cmd->error = -ETIMEDOUT; | ||
262 | else | ||
263 | mrq->cmd->error = -ETIMEDOUT; | ||
264 | |||
265 | host->cmd = NULL; | ||
266 | host->data = NULL; | ||
267 | host->mrq = NULL; | ||
268 | host->force_pio = false; | ||
269 | |||
270 | spin_unlock_irqrestore(&host->lock, flags); | ||
271 | |||
272 | tmio_mmc_reset(host); | ||
273 | |||
274 | mmc_request_done(host->mmc, mrq); | ||
275 | } | ||
276 | |||
277 | static void tmio_mmc_finish_request(struct tmio_mmc_host *host) | ||
278 | { | ||
279 | struct mmc_request *mrq = host->mrq; | ||
280 | |||
281 | if (!mrq) | ||
282 | return; | ||
283 | |||
284 | host->mrq = NULL; | ||
285 | host->cmd = NULL; | ||
286 | host->data = NULL; | ||
287 | host->force_pio = false; | ||
288 | |||
289 | cancel_delayed_work(&host->delayed_reset_work); | ||
290 | |||
291 | mmc_request_done(host->mmc, mrq); | ||
292 | } | ||
293 | |||
294 | /* These are the bitmasks the tmio chip requires to implement the MMC response | ||
295 | * types. Note that R1 and R6 are the same in this scheme. */ | ||
296 | #define APP_CMD 0x0040 | ||
297 | #define RESP_NONE 0x0300 | ||
298 | #define RESP_R1 0x0400 | ||
299 | #define RESP_R1B 0x0500 | ||
300 | #define RESP_R2 0x0600 | ||
301 | #define RESP_R3 0x0700 | ||
302 | #define DATA_PRESENT 0x0800 | ||
303 | #define TRANSFER_READ 0x1000 | ||
304 | #define TRANSFER_MULTI 0x2000 | ||
305 | #define SECURITY_CMD 0x4000 | ||
306 | |||
307 | static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) | ||
308 | { | ||
309 | struct mmc_data *data = host->data; | ||
310 | int c = cmd->opcode; | ||
311 | |||
312 | /* Command 12 is handled by hardware */ | ||
313 | if (cmd->opcode == 12 && !cmd->arg) { | ||
314 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); | ||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | switch (mmc_resp_type(cmd)) { | ||
319 | case MMC_RSP_NONE: c |= RESP_NONE; break; | ||
320 | case MMC_RSP_R1: c |= RESP_R1; break; | ||
321 | case MMC_RSP_R1B: c |= RESP_R1B; break; | ||
322 | case MMC_RSP_R2: c |= RESP_R2; break; | ||
323 | case MMC_RSP_R3: c |= RESP_R3; break; | ||
324 | default: | ||
325 | pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); | ||
326 | return -EINVAL; | ||
327 | } | ||
328 | |||
329 | host->cmd = cmd; | ||
330 | |||
331 | /* FIXME - this seems to be ok commented out but the spec suggest this bit | ||
332 | * should be set when issuing app commands. | ||
333 | * if(cmd->flags & MMC_FLAG_ACMD) | ||
334 | * c |= APP_CMD; | ||
335 | */ | ||
336 | if (data) { | ||
337 | c |= DATA_PRESENT; | ||
338 | if (data->blocks > 1) { | ||
339 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); | ||
340 | c |= TRANSFER_MULTI; | ||
341 | } | ||
342 | if (data->flags & MMC_DATA_READ) | ||
343 | c |= TRANSFER_READ; | ||
344 | } | ||
345 | |||
346 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD); | ||
347 | |||
348 | /* Fire off the command */ | ||
349 | sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); | ||
350 | sd_ctrl_write16(host, CTL_SD_CMD, c); | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * This chip always returns (at least?) as much data as you ask for. | ||
357 | * I'm unsure what happens if you ask for less than a block. This should be | ||
358 | * looked into to ensure that a funny length read doesnt hose the controller. | ||
359 | */ | ||
360 | static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) | ||
361 | { | ||
362 | struct mmc_data *data = host->data; | ||
363 | void *sg_virt; | ||
364 | unsigned short *buf; | ||
365 | unsigned int count; | ||
366 | unsigned long flags; | ||
367 | |||
368 | if ((host->chan_tx || host->chan_rx) && !host->force_pio) { | ||
369 | pr_err("PIO IRQ in DMA mode!\n"); | ||
370 | return; | ||
371 | } else if (!data) { | ||
372 | pr_debug("Spurious PIO IRQ\n"); | ||
373 | return; | ||
374 | } | ||
375 | |||
376 | sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); | ||
377 | buf = (unsigned short *)(sg_virt + host->sg_off); | ||
378 | |||
379 | count = host->sg_ptr->length - host->sg_off; | ||
380 | if (count > data->blksz) | ||
381 | count = data->blksz; | ||
382 | |||
383 | pr_debug("count: %08x offset: %08x flags %08x\n", | ||
384 | count, host->sg_off, data->flags); | ||
385 | |||
386 | /* Transfer the data */ | ||
387 | if (data->flags & MMC_DATA_READ) | ||
388 | sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | ||
389 | else | ||
390 | sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | ||
391 | |||
392 | host->sg_off += count; | ||
393 | |||
394 | tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); | ||
395 | |||
396 | if (host->sg_off == host->sg_ptr->length) | ||
397 | tmio_mmc_next_sg(host); | ||
398 | |||
399 | return; | ||
400 | } | ||
401 | |||
402 | static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host) | ||
403 | { | ||
404 | if (host->sg_ptr == &host->bounce_sg) { | ||
405 | unsigned long flags; | ||
406 | void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); | ||
407 | memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); | ||
408 | tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); | ||
409 | } | ||
410 | } | ||
411 | |||
412 | /* needs to be called with host->lock held */ | ||
413 | void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) | ||
414 | { | ||
415 | struct mmc_data *data = host->data; | ||
416 | struct mmc_command *stop; | ||
417 | |||
418 | host->data = NULL; | ||
419 | |||
420 | if (!data) { | ||
421 | dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); | ||
422 | return; | ||
423 | } | ||
424 | stop = data->stop; | ||
425 | |||
426 | /* FIXME - return correct transfer count on errors */ | ||
427 | if (!data->error) | ||
428 | data->bytes_xfered = data->blocks * data->blksz; | ||
429 | else | ||
430 | data->bytes_xfered = 0; | ||
431 | |||
432 | pr_debug("Completed data request\n"); | ||
433 | |||
434 | /* | ||
435 | * FIXME: other drivers allow an optional stop command of any given type | ||
436 | * which we dont do, as the chip can auto generate them. | ||
437 | * Perhaps we can be smarter about when to use auto CMD12 and | ||
438 | * only issue the auto request when we know this is the desired | ||
439 | * stop command, allowing fallback to the stop command the | ||
440 | * upper layers expect. For now, we do what works. | ||
441 | */ | ||
442 | |||
443 | if (data->flags & MMC_DATA_READ) { | ||
444 | if (host->chan_rx && !host->force_pio) | ||
445 | tmio_mmc_check_bounce_buffer(host); | ||
446 | dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", | ||
447 | host->mrq); | ||
448 | } else { | ||
449 | dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", | ||
450 | host->mrq); | ||
451 | } | ||
452 | |||
453 | if (stop) { | ||
454 | if (stop->opcode == 12 && !stop->arg) | ||
455 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); | ||
456 | else | ||
457 | BUG(); | ||
458 | } | ||
459 | |||
460 | tmio_mmc_finish_request(host); | ||
461 | } | ||
462 | |||
463 | static void tmio_mmc_data_irq(struct tmio_mmc_host *host) | ||
464 | { | ||
465 | struct mmc_data *data; | ||
466 | spin_lock(&host->lock); | ||
467 | data = host->data; | ||
468 | |||
469 | if (!data) | ||
470 | goto out; | ||
471 | |||
472 | if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { | ||
473 | /* | ||
474 | * Has all data been written out yet? Testing on SuperH showed, | ||
475 | * that in most cases the first interrupt comes already with the | ||
476 | * BUSY status bit clear, but on some operations, like mount or | ||
477 | * in the beginning of a write / sync / umount, there is one | ||
478 | * DATAEND interrupt with the BUSY bit set, in this cases | ||
479 | * waiting for one more interrupt fixes the problem. | ||
480 | */ | ||
481 | if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { | ||
482 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
483 | tasklet_schedule(&host->dma_complete); | ||
484 | } | ||
485 | } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { | ||
486 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
487 | tasklet_schedule(&host->dma_complete); | ||
488 | } else { | ||
489 | tmio_mmc_do_data_irq(host); | ||
490 | tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); | ||
491 | } | ||
492 | out: | ||
493 | spin_unlock(&host->lock); | ||
494 | } | ||
495 | |||
496 | static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, | ||
497 | unsigned int stat) | ||
498 | { | ||
499 | struct mmc_command *cmd = host->cmd; | ||
500 | int i, addr; | ||
501 | |||
502 | spin_lock(&host->lock); | ||
503 | |||
504 | if (!host->cmd) { | ||
505 | pr_debug("Spurious CMD irq\n"); | ||
506 | goto out; | ||
507 | } | ||
508 | |||
509 | host->cmd = NULL; | ||
510 | |||
511 | /* This controller is sicker than the PXA one. Not only do we need to | ||
512 | * drop the top 8 bits of the first response word, we also need to | ||
513 | * modify the order of the response for short response command types. | ||
514 | */ | ||
515 | |||
516 | for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) | ||
517 | cmd->resp[i] = sd_ctrl_read32(host, addr); | ||
518 | |||
519 | if (cmd->flags & MMC_RSP_136) { | ||
520 | cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); | ||
521 | cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); | ||
522 | cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); | ||
523 | cmd->resp[3] <<= 8; | ||
524 | } else if (cmd->flags & MMC_RSP_R3) { | ||
525 | cmd->resp[0] = cmd->resp[3]; | ||
526 | } | ||
527 | |||
528 | if (stat & TMIO_STAT_CMDTIMEOUT) | ||
529 | cmd->error = -ETIMEDOUT; | ||
530 | else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) | ||
531 | cmd->error = -EILSEQ; | ||
532 | |||
533 | /* If there is data to handle we enable data IRQs here, and | ||
534 | * we will ultimatley finish the request in the data_end handler. | ||
535 | * If theres no data or we encountered an error, finish now. | ||
536 | */ | ||
537 | if (host->data && !cmd->error) { | ||
538 | if (host->data->flags & MMC_DATA_READ) { | ||
539 | if (host->force_pio || !host->chan_rx) | ||
540 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); | ||
541 | else | ||
542 | tasklet_schedule(&host->dma_issue); | ||
543 | } else { | ||
544 | if (host->force_pio || !host->chan_tx) | ||
545 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); | ||
546 | else | ||
547 | tasklet_schedule(&host->dma_issue); | ||
548 | } | ||
549 | } else { | ||
550 | tmio_mmc_finish_request(host); | ||
551 | } | ||
552 | |||
553 | out: | ||
554 | spin_unlock(&host->lock); | ||
555 | } | ||
556 | |||
557 | static irqreturn_t tmio_mmc_irq(int irq, void *devid) | ||
558 | { | ||
559 | struct tmio_mmc_host *host = devid; | ||
560 | struct tmio_mmc_data *pdata = host->pdata; | ||
561 | unsigned int ireg, irq_mask, status; | ||
562 | unsigned int sdio_ireg, sdio_irq_mask, sdio_status; | ||
563 | |||
564 | pr_debug("MMC IRQ begin\n"); | ||
565 | |||
566 | status = sd_ctrl_read32(host, CTL_STATUS); | ||
567 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | ||
568 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | ||
569 | |||
570 | sdio_ireg = 0; | ||
571 | if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
572 | sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); | ||
573 | sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); | ||
574 | sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; | ||
575 | |||
576 | sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); | ||
577 | |||
578 | if (sdio_ireg && !host->sdio_irq_enabled) { | ||
579 | pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", | ||
580 | sdio_status, sdio_irq_mask, sdio_ireg); | ||
581 | tmio_mmc_enable_sdio_irq(host->mmc, 0); | ||
582 | goto out; | ||
583 | } | ||
584 | |||
585 | if (host->mmc->caps & MMC_CAP_SDIO_IRQ && | ||
586 | sdio_ireg & TMIO_SDIO_STAT_IOIRQ) | ||
587 | mmc_signal_sdio_irq(host->mmc); | ||
588 | |||
589 | if (sdio_ireg) | ||
590 | goto out; | ||
591 | } | ||
592 | |||
593 | pr_debug_status(status); | ||
594 | pr_debug_status(ireg); | ||
595 | |||
596 | if (!ireg) { | ||
597 | tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask); | ||
598 | |||
599 | pr_warning("tmio_mmc: Spurious irq, disabling! " | ||
600 | "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); | ||
601 | pr_debug_status(status); | ||
602 | |||
603 | goto out; | ||
604 | } | ||
605 | |||
606 | while (ireg) { | ||
607 | /* Card insert / remove attempts */ | ||
608 | if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { | ||
609 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | | ||
610 | TMIO_STAT_CARD_REMOVE); | ||
611 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | ||
612 | } | ||
613 | |||
614 | /* CRC and other errors */ | ||
615 | /* if (ireg & TMIO_STAT_ERR_IRQ) | ||
616 | * handled |= tmio_error_irq(host, irq, stat); | ||
617 | */ | ||
618 | |||
619 | /* Command completion */ | ||
620 | if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { | ||
621 | tmio_mmc_ack_mmc_irqs(host, | ||
622 | TMIO_STAT_CMDRESPEND | | ||
623 | TMIO_STAT_CMDTIMEOUT); | ||
624 | tmio_mmc_cmd_irq(host, status); | ||
625 | } | ||
626 | |||
627 | /* Data transfer */ | ||
628 | if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { | ||
629 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); | ||
630 | tmio_mmc_pio_irq(host); | ||
631 | } | ||
632 | |||
633 | /* Data transfer completion */ | ||
634 | if (ireg & TMIO_STAT_DATAEND) { | ||
635 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
636 | tmio_mmc_data_irq(host); | ||
637 | } | ||
638 | |||
639 | /* Check status - keep going until we've handled it all */ | ||
640 | status = sd_ctrl_read32(host, CTL_STATUS); | ||
641 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | ||
642 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | ||
643 | |||
644 | pr_debug("Status at end of loop: %08x\n", status); | ||
645 | pr_debug_status(status); | ||
646 | } | ||
647 | pr_debug("MMC IRQ end\n"); | ||
648 | |||
649 | out: | ||
650 | return IRQ_HANDLED; | ||
651 | } | ||
652 | |||
653 | static int tmio_mmc_start_data(struct tmio_mmc_host *host, | ||
654 | struct mmc_data *data) | ||
655 | { | ||
656 | struct tmio_mmc_data *pdata = host->pdata; | ||
657 | |||
658 | pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", | ||
659 | data->blksz, data->blocks); | ||
660 | |||
661 | /* Some hardware cannot perform 2 byte requests in 4 bit mode */ | ||
662 | if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { | ||
663 | int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; | ||
664 | |||
665 | if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { | ||
666 | pr_err("%s: %d byte block unsupported in 4 bit mode\n", | ||
667 | mmc_hostname(host->mmc), data->blksz); | ||
668 | return -EINVAL; | ||
669 | } | ||
670 | } | ||
671 | |||
672 | tmio_mmc_init_sg(host, data); | ||
673 | host->data = data; | ||
674 | |||
675 | /* Set transfer length / blocksize */ | ||
676 | sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); | ||
677 | sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); | ||
678 | |||
679 | tmio_mmc_start_dma(host, data); | ||
680 | |||
681 | return 0; | ||
682 | } | ||
683 | |||
684 | /* Process requests from the MMC layer */ | ||
685 | static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | ||
686 | { | ||
687 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
688 | int ret; | ||
689 | |||
690 | if (host->mrq) | ||
691 | pr_debug("request not null\n"); | ||
692 | |||
693 | host->last_req_ts = jiffies; | ||
694 | wmb(); | ||
695 | host->mrq = mrq; | ||
696 | |||
697 | if (mrq->data) { | ||
698 | ret = tmio_mmc_start_data(host, mrq->data); | ||
699 | if (ret) | ||
700 | goto fail; | ||
701 | } | ||
702 | |||
703 | ret = tmio_mmc_start_command(host, mrq->cmd); | ||
704 | if (!ret) { | ||
705 | schedule_delayed_work(&host->delayed_reset_work, | ||
706 | msecs_to_jiffies(2000)); | ||
707 | return; | ||
708 | } | ||
709 | |||
710 | fail: | ||
711 | host->mrq = NULL; | ||
712 | host->force_pio = false; | ||
713 | mrq->cmd->error = ret; | ||
714 | mmc_request_done(mmc, mrq); | ||
715 | } | ||
716 | |||
717 | /* Set MMC clock / power. | ||
718 | * Note: This controller uses a simple divider scheme therefore it cannot | ||
719 | * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as | ||
720 | * MMC wont run that fast, it has to be clocked at 12MHz which is the next | ||
721 | * slowest setting. | ||
722 | */ | ||
723 | static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | ||
724 | { | ||
725 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
726 | |||
727 | if (ios->clock) | ||
728 | tmio_mmc_set_clock(host, ios->clock); | ||
729 | |||
730 | /* Power sequence - OFF -> UP -> ON */ | ||
731 | if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { | ||
732 | /* power down SD bus */ | ||
733 | if (ios->power_mode == MMC_POWER_OFF && host->set_pwr) | ||
734 | host->set_pwr(host->pdev, 0); | ||
735 | tmio_mmc_clk_stop(host); | ||
736 | } else if (ios->power_mode == MMC_POWER_UP) { | ||
737 | /* power up SD bus */ | ||
738 | if (host->set_pwr) | ||
739 | host->set_pwr(host->pdev, 1); | ||
740 | } else { | ||
741 | /* start bus clock */ | ||
742 | tmio_mmc_clk_start(host); | ||
743 | } | ||
744 | |||
745 | switch (ios->bus_width) { | ||
746 | case MMC_BUS_WIDTH_1: | ||
747 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); | ||
748 | break; | ||
749 | case MMC_BUS_WIDTH_4: | ||
750 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); | ||
751 | break; | ||
752 | } | ||
753 | |||
754 | /* Let things settle. delay taken from winCE driver */ | ||
755 | udelay(140); | ||
756 | } | ||
757 | |||
758 | static int tmio_mmc_get_ro(struct mmc_host *mmc) | ||
759 | { | ||
760 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
761 | struct tmio_mmc_data *pdata = host->pdata; | ||
762 | |||
763 | return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || | ||
764 | !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); | ||
765 | } | ||
766 | |||
767 | static int tmio_mmc_get_cd(struct mmc_host *mmc) | ||
768 | { | ||
769 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
770 | struct tmio_mmc_data *pdata = host->pdata; | ||
771 | |||
772 | if (!pdata->get_cd) | ||
773 | return -ENOSYS; | ||
774 | else | ||
775 | return pdata->get_cd(host->pdev); | ||
776 | } | ||
777 | |||
778 | static const struct mmc_host_ops tmio_mmc_ops = { | ||
779 | .request = tmio_mmc_request, | ||
780 | .set_ios = tmio_mmc_set_ios, | ||
781 | .get_ro = tmio_mmc_get_ro, | ||
782 | .get_cd = tmio_mmc_get_cd, | ||
783 | .enable_sdio_irq = tmio_mmc_enable_sdio_irq, | ||
784 | }; | ||
785 | |||
786 | int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, | ||
787 | struct platform_device *pdev, | ||
788 | struct tmio_mmc_data *pdata) | ||
789 | { | ||
790 | struct tmio_mmc_host *_host; | ||
791 | struct mmc_host *mmc; | ||
792 | struct resource *res_ctl; | ||
793 | int ret; | ||
794 | u32 irq_mask = TMIO_MASK_CMD; | ||
795 | |||
796 | res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
797 | if (!res_ctl) | ||
798 | return -EINVAL; | ||
799 | |||
800 | mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev); | ||
801 | if (!mmc) | ||
802 | return -ENOMEM; | ||
803 | |||
804 | _host = mmc_priv(mmc); | ||
805 | _host->pdata = pdata; | ||
806 | _host->mmc = mmc; | ||
807 | _host->pdev = pdev; | ||
808 | platform_set_drvdata(pdev, mmc); | ||
809 | |||
810 | _host->set_pwr = pdata->set_pwr; | ||
811 | _host->set_clk_div = pdata->set_clk_div; | ||
812 | |||
813 | /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ | ||
814 | _host->bus_shift = resource_size(res_ctl) >> 10; | ||
815 | |||
816 | _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); | ||
817 | if (!_host->ctl) { | ||
818 | ret = -ENOMEM; | ||
819 | goto host_free; | ||
820 | } | ||
821 | |||
822 | mmc->ops = &tmio_mmc_ops; | ||
823 | mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; | ||
824 | mmc->f_max = pdata->hclk; | ||
825 | mmc->f_min = mmc->f_max / 512; | ||
826 | mmc->max_segs = 32; | ||
827 | mmc->max_blk_size = 512; | ||
828 | mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * | ||
829 | mmc->max_segs; | ||
830 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; | ||
831 | mmc->max_seg_size = mmc->max_req_size; | ||
832 | if (pdata->ocr_mask) | ||
833 | mmc->ocr_avail = pdata->ocr_mask; | ||
834 | else | ||
835 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | ||
836 | |||
837 | tmio_mmc_clk_stop(_host); | ||
838 | tmio_mmc_reset(_host); | ||
839 | |||
840 | ret = platform_get_irq(pdev, 0); | ||
841 | if (ret < 0) | ||
842 | goto unmap_ctl; | ||
843 | |||
844 | _host->irq = ret; | ||
845 | |||
846 | tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); | ||
847 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | ||
848 | tmio_mmc_enable_sdio_irq(mmc, 0); | ||
849 | |||
850 | ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED | | ||
851 | IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host); | ||
852 | if (ret) | ||
853 | goto unmap_ctl; | ||
854 | |||
855 | spin_lock_init(&_host->lock); | ||
856 | |||
857 | /* Init delayed work for request timeouts */ | ||
858 | INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); | ||
859 | |||
860 | /* See if we also get DMA */ | ||
861 | tmio_mmc_request_dma(_host, pdata); | ||
862 | |||
863 | mmc_add_host(mmc); | ||
864 | |||
865 | /* Unmask the IRQs we want to know about */ | ||
866 | if (!_host->chan_rx) | ||
867 | irq_mask |= TMIO_MASK_READOP; | ||
868 | if (!_host->chan_tx) | ||
869 | irq_mask |= TMIO_MASK_WRITEOP; | ||
870 | |||
871 | tmio_mmc_enable_mmc_irqs(_host, irq_mask); | ||
872 | |||
873 | *host = _host; | ||
874 | |||
875 | return 0; | ||
876 | |||
877 | unmap_ctl: | ||
878 | iounmap(_host->ctl); | ||
879 | host_free: | ||
880 | mmc_free_host(mmc); | ||
881 | |||
882 | return ret; | ||
883 | } | ||
884 | EXPORT_SYMBOL(tmio_mmc_host_probe); | ||
885 | |||
886 | void tmio_mmc_host_remove(struct tmio_mmc_host *host) | ||
887 | { | ||
888 | mmc_remove_host(host->mmc); | ||
889 | cancel_delayed_work_sync(&host->delayed_reset_work); | ||
890 | tmio_mmc_release_dma(host); | ||
891 | free_irq(host->irq, host); | ||
892 | iounmap(host->ctl); | ||
893 | mmc_free_host(host->mmc); | ||
894 | } | ||
895 | EXPORT_SYMBOL(tmio_mmc_host_remove); | ||
896 | |||
897 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 8c5b4881ccd6..4dfe2c02ea91 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c | |||
@@ -1087,14 +1087,13 @@ static int __devinit via_sd_probe(struct pci_dev *pcidev, | |||
1087 | struct mmc_host *mmc; | 1087 | struct mmc_host *mmc; |
1088 | struct via_crdr_mmc_host *sdhost; | 1088 | struct via_crdr_mmc_host *sdhost; |
1089 | u32 base, len; | 1089 | u32 base, len; |
1090 | u8 rev, gatt; | 1090 | u8 gatt; |
1091 | int ret; | 1091 | int ret; |
1092 | 1092 | ||
1093 | pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &rev); | ||
1094 | pr_info(DRV_NAME | 1093 | pr_info(DRV_NAME |
1095 | ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n", | 1094 | ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n", |
1096 | pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, | 1095 | pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, |
1097 | (int)rev); | 1096 | (int)pcidev->revision); |
1098 | 1097 | ||
1099 | ret = pci_enable_device(pcidev); | 1098 | ret = pci_enable_device(pcidev); |
1100 | if (ret) | 1099 | if (ret) |
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 22abfb39d813..68d45ba2d9b9 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
@@ -1237,8 +1237,17 @@ static int bfin_mac_enable(struct phy_device *phydev) | |||
1237 | 1237 | ||
1238 | if (phydev->interface == PHY_INTERFACE_MODE_RMII) { | 1238 | if (phydev->interface == PHY_INTERFACE_MODE_RMII) { |
1239 | opmode |= RMII; /* For Now only 100MBit are supported */ | 1239 | opmode |= RMII; /* For Now only 100MBit are supported */ |
1240 | #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2 | 1240 | #if defined(CONFIG_BF537) || defined(CONFIG_BF536) |
1241 | opmode |= TE; | 1241 | if (__SILICON_REVISION__ < 3) { |
1242 | /* | ||
1243 | * This isn't publicly documented (fun times!), but in | ||
1244 | * silicon <=0.2, the RX and TX pins are clocked together. | ||
1245 | * So in order to recv, we must enable the transmit side | ||
1246 | * as well. This will cause a spurious TX interrupt too, | ||
1247 | * but we can easily consume that. | ||
1248 | */ | ||
1249 | opmode |= TE; | ||
1250 | } | ||
1242 | #endif | 1251 | #endif |
1243 | } | 1252 | } |
1244 | 1253 | ||
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index d1865cc97313..8e6d618b5305 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -8317,7 +8317,7 @@ static const struct net_device_ops bnx2_netdev_ops = { | |||
8317 | #endif | 8317 | #endif |
8318 | }; | 8318 | }; |
8319 | 8319 | ||
8320 | static void inline vlan_features_add(struct net_device *dev, u32 flags) | 8320 | static inline void vlan_features_add(struct net_device *dev, u32 flags) |
8321 | { | 8321 | { |
8322 | dev->vlan_features |= flags; | 8322 | dev->vlan_features |= flags; |
8323 | } | 8323 | } |
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 110eda01843c..31552959aed7 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c | |||
@@ -588,14 +588,9 @@ static void c_can_chip_config(struct net_device *dev) | |||
588 | { | 588 | { |
589 | struct c_can_priv *priv = netdev_priv(dev); | 589 | struct c_can_priv *priv = netdev_priv(dev); |
590 | 590 | ||
591 | if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) | 591 | /* enable automatic retransmission */ |
592 | /* disable automatic retransmission */ | 592 | priv->write_reg(priv, &priv->regs->control, |
593 | priv->write_reg(priv, &priv->regs->control, | 593 | CONTROL_ENABLE_AR); |
594 | CONTROL_DISABLE_AR); | ||
595 | else | ||
596 | /* enable automatic retransmission */ | ||
597 | priv->write_reg(priv, &priv->regs->control, | ||
598 | CONTROL_ENABLE_AR); | ||
599 | 594 | ||
600 | if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY & | 595 | if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY & |
601 | CAN_CTRLMODE_LOOPBACK)) { | 596 | CAN_CTRLMODE_LOOPBACK)) { |
@@ -704,7 +699,6 @@ static void c_can_do_tx(struct net_device *dev) | |||
704 | 699 | ||
705 | for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { | 700 | for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { |
706 | msg_obj_no = get_tx_echo_msg_obj(priv); | 701 | msg_obj_no = get_tx_echo_msg_obj(priv); |
707 | c_can_inval_msg_object(dev, 0, msg_obj_no); | ||
708 | val = c_can_read_reg32(priv, &priv->regs->txrqst1); | 702 | val = c_can_read_reg32(priv, &priv->regs->txrqst1); |
709 | if (!(val & (1 << msg_obj_no))) { | 703 | if (!(val & (1 << msg_obj_no))) { |
710 | can_get_echo_skb(dev, | 704 | can_get_echo_skb(dev, |
@@ -713,6 +707,7 @@ static void c_can_do_tx(struct net_device *dev) | |||
713 | &priv->regs->ifregs[0].msg_cntrl) | 707 | &priv->regs->ifregs[0].msg_cntrl) |
714 | & IF_MCONT_DLC_MASK; | 708 | & IF_MCONT_DLC_MASK; |
715 | stats->tx_packets++; | 709 | stats->tx_packets++; |
710 | c_can_inval_msg_object(dev, 0, msg_obj_no); | ||
716 | } | 711 | } |
717 | } | 712 | } |
718 | 713 | ||
@@ -1112,8 +1107,7 @@ struct net_device *alloc_c_can_dev(void) | |||
1112 | priv->can.bittiming_const = &c_can_bittiming_const; | 1107 | priv->can.bittiming_const = &c_can_bittiming_const; |
1113 | priv->can.do_set_mode = c_can_set_mode; | 1108 | priv->can.do_set_mode = c_can_set_mode; |
1114 | priv->can.do_get_berr_counter = c_can_get_berr_counter; | 1109 | priv->can.do_get_berr_counter = c_can_get_berr_counter; |
1115 | priv->can.ctrlmode_supported = CAN_CTRLMODE_ONE_SHOT | | 1110 | priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | |
1116 | CAN_CTRLMODE_LOOPBACK | | ||
1117 | CAN_CTRLMODE_LISTENONLY | | 1111 | CAN_CTRLMODE_LISTENONLY | |
1118 | CAN_CTRLMODE_BERR_REPORTING; | 1112 | CAN_CTRLMODE_BERR_REPORTING; |
1119 | 1113 | ||
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index e629b961ae2d..cc90824f2c9c 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c | |||
@@ -73,7 +73,8 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev) | |||
73 | void __iomem *addr; | 73 | void __iomem *addr; |
74 | struct net_device *dev; | 74 | struct net_device *dev; |
75 | struct c_can_priv *priv; | 75 | struct c_can_priv *priv; |
76 | struct resource *mem, *irq; | 76 | struct resource *mem; |
77 | int irq; | ||
77 | #ifdef CONFIG_HAVE_CLK | 78 | #ifdef CONFIG_HAVE_CLK |
78 | struct clk *clk; | 79 | struct clk *clk; |
79 | 80 | ||
@@ -88,8 +89,8 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev) | |||
88 | 89 | ||
89 | /* get the platform data */ | 90 | /* get the platform data */ |
90 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 91 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
91 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 92 | irq = platform_get_irq(pdev, 0); |
92 | if (!mem || (irq <= 0)) { | 93 | if (!mem || irq <= 0) { |
93 | ret = -ENODEV; | 94 | ret = -ENODEV; |
94 | goto exit_free_clk; | 95 | goto exit_free_clk; |
95 | } | 96 | } |
@@ -117,7 +118,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev) | |||
117 | 118 | ||
118 | priv = netdev_priv(dev); | 119 | priv = netdev_priv(dev); |
119 | 120 | ||
120 | dev->irq = irq->start; | 121 | dev->irq = irq; |
121 | priv->regs = addr; | 122 | priv->regs = addr; |
122 | #ifdef CONFIG_HAVE_CLK | 123 | #ifdef CONFIG_HAVE_CLK |
123 | priv->can.clock.freq = clk_get_rate(clk); | 124 | priv->can.clock.freq = clk_get_rate(clk); |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 4d538a4e9d55..910893143295 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
@@ -1983,14 +1983,20 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | |||
1983 | { | 1983 | { |
1984 | struct port_info *pi = netdev_priv(dev); | 1984 | struct port_info *pi = netdev_priv(dev); |
1985 | struct adapter *adapter = pi->adapter; | 1985 | struct adapter *adapter = pi->adapter; |
1986 | struct qset_params *qsp = &adapter->params.sge.qset[0]; | 1986 | struct qset_params *qsp; |
1987 | struct sge_qset *qs = &adapter->sge.qs[0]; | 1987 | struct sge_qset *qs; |
1988 | int i; | ||
1988 | 1989 | ||
1989 | if (c->rx_coalesce_usecs * 10 > M_NEWTIMER) | 1990 | if (c->rx_coalesce_usecs * 10 > M_NEWTIMER) |
1990 | return -EINVAL; | 1991 | return -EINVAL; |
1991 | 1992 | ||
1992 | qsp->coalesce_usecs = c->rx_coalesce_usecs; | 1993 | for (i = 0; i < pi->nqsets; i++) { |
1993 | t3_update_qset_coalesce(qs, qsp); | 1994 | qsp = &adapter->params.sge.qset[i]; |
1995 | qs = &adapter->sge.qs[i]; | ||
1996 | qsp->coalesce_usecs = c->rx_coalesce_usecs; | ||
1997 | t3_update_qset_coalesce(qs, qsp); | ||
1998 | } | ||
1999 | |||
1994 | return 0; | 2000 | return 0; |
1995 | } | 2001 | } |
1996 | 2002 | ||
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 317708113601..b7af5bab9937 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -621,9 +621,9 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) | |||
621 | /* change in wol state, update IRQ state */ | 621 | /* change in wol state, update IRQ state */ |
622 | 622 | ||
623 | if (!dm->wake_state) | 623 | if (!dm->wake_state) |
624 | set_irq_wake(dm->irq_wake, 1); | 624 | irq_set_irq_wake(dm->irq_wake, 1); |
625 | else if (dm->wake_state & !opts) | 625 | else if (dm->wake_state & !opts) |
626 | set_irq_wake(dm->irq_wake, 0); | 626 | irq_set_irq_wake(dm->irq_wake, 0); |
627 | } | 627 | } |
628 | 628 | ||
629 | dm->wake_state = opts; | 629 | dm->wake_state = opts; |
@@ -1424,13 +1424,13 @@ dm9000_probe(struct platform_device *pdev) | |||
1424 | } else { | 1424 | } else { |
1425 | 1425 | ||
1426 | /* test to see if irq is really wakeup capable */ | 1426 | /* test to see if irq is really wakeup capable */ |
1427 | ret = set_irq_wake(db->irq_wake, 1); | 1427 | ret = irq_set_irq_wake(db->irq_wake, 1); |
1428 | if (ret) { | 1428 | if (ret) { |
1429 | dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", | 1429 | dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", |
1430 | db->irq_wake, ret); | 1430 | db->irq_wake, ret); |
1431 | ret = 0; | 1431 | ret = 0; |
1432 | } else { | 1432 | } else { |
1433 | set_irq_wake(db->irq_wake, 0); | 1433 | irq_set_irq_wake(db->irq_wake, 0); |
1434 | db->wake_supported = 1; | 1434 | db->wake_supported = 1; |
1435 | } | 1435 | } |
1436 | } | 1436 | } |
diff --git a/drivers/net/jme.c b/drivers/net/jme.c index f690474f4409..994c80939c7a 100644 --- a/drivers/net/jme.c +++ b/drivers/net/jme.c | |||
@@ -273,7 +273,7 @@ jme_clear_pm(struct jme_adapter *jme) | |||
273 | { | 273 | { |
274 | jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); | 274 | jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); |
275 | pci_set_power_state(jme->pdev, PCI_D0); | 275 | pci_set_power_state(jme->pdev, PCI_D0); |
276 | pci_enable_wake(jme->pdev, PCI_D0, false); | 276 | device_set_wakeup_enable(&jme->pdev->dev, false); |
277 | } | 277 | } |
278 | 278 | ||
279 | static int | 279 | static int |
@@ -2538,6 +2538,8 @@ jme_set_wol(struct net_device *netdev, | |||
2538 | 2538 | ||
2539 | jwrite32(jme, JME_PMCS, jme->reg_pmcs); | 2539 | jwrite32(jme, JME_PMCS, jme->reg_pmcs); |
2540 | 2540 | ||
2541 | device_set_wakeup_enable(&jme->pdev->dev, jme->reg_pmcs); | ||
2542 | |||
2541 | return 0; | 2543 | return 0; |
2542 | } | 2544 | } |
2543 | 2545 | ||
@@ -3172,9 +3174,9 @@ jme_shutdown(struct pci_dev *pdev) | |||
3172 | } | 3174 | } |
3173 | 3175 | ||
3174 | #ifdef CONFIG_PM | 3176 | #ifdef CONFIG_PM |
3175 | static int | 3177 | static int jme_suspend(struct device *dev) |
3176 | jme_suspend(struct pci_dev *pdev, pm_message_t state) | ||
3177 | { | 3178 | { |
3179 | struct pci_dev *pdev = to_pci_dev(dev); | ||
3178 | struct net_device *netdev = pci_get_drvdata(pdev); | 3180 | struct net_device *netdev = pci_get_drvdata(pdev); |
3179 | struct jme_adapter *jme = netdev_priv(netdev); | 3181 | struct jme_adapter *jme = netdev_priv(netdev); |
3180 | 3182 | ||
@@ -3206,22 +3208,18 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3206 | tasklet_hi_enable(&jme->rxclean_task); | 3208 | tasklet_hi_enable(&jme->rxclean_task); |
3207 | tasklet_hi_enable(&jme->rxempty_task); | 3209 | tasklet_hi_enable(&jme->rxempty_task); |
3208 | 3210 | ||
3209 | pci_save_state(pdev); | ||
3210 | jme_powersave_phy(jme); | 3211 | jme_powersave_phy(jme); |
3211 | pci_enable_wake(jme->pdev, PCI_D3hot, true); | ||
3212 | pci_set_power_state(pdev, PCI_D3hot); | ||
3213 | 3212 | ||
3214 | return 0; | 3213 | return 0; |
3215 | } | 3214 | } |
3216 | 3215 | ||
3217 | static int | 3216 | static int jme_resume(struct device *dev) |
3218 | jme_resume(struct pci_dev *pdev) | ||
3219 | { | 3217 | { |
3218 | struct pci_dev *pdev = to_pci_dev(dev); | ||
3220 | struct net_device *netdev = pci_get_drvdata(pdev); | 3219 | struct net_device *netdev = pci_get_drvdata(pdev); |
3221 | struct jme_adapter *jme = netdev_priv(netdev); | 3220 | struct jme_adapter *jme = netdev_priv(netdev); |
3222 | 3221 | ||
3223 | jme_clear_pm(jme); | 3222 | jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); |
3224 | pci_restore_state(pdev); | ||
3225 | 3223 | ||
3226 | jme_phy_on(jme); | 3224 | jme_phy_on(jme); |
3227 | if (test_bit(JME_FLAG_SSET, &jme->flags)) | 3225 | if (test_bit(JME_FLAG_SSET, &jme->flags)) |
@@ -3238,6 +3236,13 @@ jme_resume(struct pci_dev *pdev) | |||
3238 | 3236 | ||
3239 | return 0; | 3237 | return 0; |
3240 | } | 3238 | } |
3239 | |||
3240 | static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume); | ||
3241 | #define JME_PM_OPS (&jme_pm_ops) | ||
3242 | |||
3243 | #else | ||
3244 | |||
3245 | #define JME_PM_OPS NULL | ||
3241 | #endif | 3246 | #endif |
3242 | 3247 | ||
3243 | static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = { | 3248 | static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = { |
@@ -3251,11 +3256,8 @@ static struct pci_driver jme_driver = { | |||
3251 | .id_table = jme_pci_tbl, | 3256 | .id_table = jme_pci_tbl, |
3252 | .probe = jme_init_one, | 3257 | .probe = jme_init_one, |
3253 | .remove = __devexit_p(jme_remove_one), | 3258 | .remove = __devexit_p(jme_remove_one), |
3254 | #ifdef CONFIG_PM | ||
3255 | .suspend = jme_suspend, | ||
3256 | .resume = jme_resume, | ||
3257 | #endif /* CONFIG_PM */ | ||
3258 | .shutdown = jme_shutdown, | 3259 | .shutdown = jme_shutdown, |
3260 | .driver.pm = JME_PM_OPS, | ||
3259 | }; | 3261 | }; |
3260 | 3262 | ||
3261 | static int __init | 3263 | static int __init |
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c index 540a8dcbcc46..7f7d5708a658 100644 --- a/drivers/net/ksz884x.c +++ b/drivers/net/ksz884x.c | |||
@@ -4898,7 +4898,7 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev) | |||
4898 | goto unlock; | 4898 | goto unlock; |
4899 | } | 4899 | } |
4900 | skb_copy_and_csum_dev(org_skb, skb->data); | 4900 | skb_copy_and_csum_dev(org_skb, skb->data); |
4901 | org_skb->ip_summed = 0; | 4901 | org_skb->ip_summed = CHECKSUM_NONE; |
4902 | skb->len = org_skb->len; | 4902 | skb->len = org_skb->len; |
4903 | copy_old_skb(org_skb, skb); | 4903 | copy_old_skb(org_skb, skb); |
4904 | } | 4904 | } |
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 5762ebde4455..4f158baa0246 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -742,6 +742,9 @@ int mlx4_en_start_port(struct net_device *dev) | |||
742 | 0, MLX4_PROT_ETH)) | 742 | 0, MLX4_PROT_ETH)) |
743 | mlx4_warn(mdev, "Failed Attaching Broadcast\n"); | 743 | mlx4_warn(mdev, "Failed Attaching Broadcast\n"); |
744 | 744 | ||
745 | /* Must redo promiscuous mode setup. */ | ||
746 | priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); | ||
747 | |||
745 | /* Schedule multicast task to populate multicast list */ | 748 | /* Schedule multicast task to populate multicast list */ |
746 | queue_work(mdev->workqueue, &priv->mcast_task); | 749 | queue_work(mdev->workqueue, &priv->mcast_task); |
747 | 750 | ||
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 1f4e8680a96a..673dc600c891 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -1312,17 +1312,26 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev, | |||
1312 | * page into an skb */ | 1312 | * page into an skb */ |
1313 | 1313 | ||
1314 | static inline int | 1314 | static inline int |
1315 | myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, | 1315 | myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum, |
1316 | int bytes, int len, __wsum csum) | 1316 | int lro_enabled) |
1317 | { | 1317 | { |
1318 | struct myri10ge_priv *mgp = ss->mgp; | 1318 | struct myri10ge_priv *mgp = ss->mgp; |
1319 | struct sk_buff *skb; | 1319 | struct sk_buff *skb; |
1320 | struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; | 1320 | struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; |
1321 | int i, idx, hlen, remainder; | 1321 | struct myri10ge_rx_buf *rx; |
1322 | int i, idx, hlen, remainder, bytes; | ||
1322 | struct pci_dev *pdev = mgp->pdev; | 1323 | struct pci_dev *pdev = mgp->pdev; |
1323 | struct net_device *dev = mgp->dev; | 1324 | struct net_device *dev = mgp->dev; |
1324 | u8 *va; | 1325 | u8 *va; |
1325 | 1326 | ||
1327 | if (len <= mgp->small_bytes) { | ||
1328 | rx = &ss->rx_small; | ||
1329 | bytes = mgp->small_bytes; | ||
1330 | } else { | ||
1331 | rx = &ss->rx_big; | ||
1332 | bytes = mgp->big_bytes; | ||
1333 | } | ||
1334 | |||
1326 | len += MXGEFW_PAD; | 1335 | len += MXGEFW_PAD; |
1327 | idx = rx->cnt & rx->mask; | 1336 | idx = rx->cnt & rx->mask; |
1328 | va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; | 1337 | va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; |
@@ -1341,7 +1350,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, | |||
1341 | remainder -= MYRI10GE_ALLOC_SIZE; | 1350 | remainder -= MYRI10GE_ALLOC_SIZE; |
1342 | } | 1351 | } |
1343 | 1352 | ||
1344 | if (dev->features & NETIF_F_LRO) { | 1353 | if (lro_enabled) { |
1345 | rx_frags[0].page_offset += MXGEFW_PAD; | 1354 | rx_frags[0].page_offset += MXGEFW_PAD; |
1346 | rx_frags[0].size -= MXGEFW_PAD; | 1355 | rx_frags[0].size -= MXGEFW_PAD; |
1347 | len -= MXGEFW_PAD; | 1356 | len -= MXGEFW_PAD; |
@@ -1463,7 +1472,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) | |||
1463 | { | 1472 | { |
1464 | struct myri10ge_rx_done *rx_done = &ss->rx_done; | 1473 | struct myri10ge_rx_done *rx_done = &ss->rx_done; |
1465 | struct myri10ge_priv *mgp = ss->mgp; | 1474 | struct myri10ge_priv *mgp = ss->mgp; |
1466 | struct net_device *netdev = mgp->dev; | 1475 | |
1467 | unsigned long rx_bytes = 0; | 1476 | unsigned long rx_bytes = 0; |
1468 | unsigned long rx_packets = 0; | 1477 | unsigned long rx_packets = 0; |
1469 | unsigned long rx_ok; | 1478 | unsigned long rx_ok; |
@@ -1474,18 +1483,18 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) | |||
1474 | u16 length; | 1483 | u16 length; |
1475 | __wsum checksum; | 1484 | __wsum checksum; |
1476 | 1485 | ||
1486 | /* | ||
1487 | * Prevent compiler from generating more than one ->features memory | ||
1488 | * access to avoid theoretical race condition with functions that | ||
1489 | * change NETIF_F_LRO flag at runtime. | ||
1490 | */ | ||
1491 | bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO; | ||
1492 | |||
1477 | while (rx_done->entry[idx].length != 0 && work_done < budget) { | 1493 | while (rx_done->entry[idx].length != 0 && work_done < budget) { |
1478 | length = ntohs(rx_done->entry[idx].length); | 1494 | length = ntohs(rx_done->entry[idx].length); |
1479 | rx_done->entry[idx].length = 0; | 1495 | rx_done->entry[idx].length = 0; |
1480 | checksum = csum_unfold(rx_done->entry[idx].checksum); | 1496 | checksum = csum_unfold(rx_done->entry[idx].checksum); |
1481 | if (length <= mgp->small_bytes) | 1497 | rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled); |
1482 | rx_ok = myri10ge_rx_done(ss, &ss->rx_small, | ||
1483 | mgp->small_bytes, | ||
1484 | length, checksum); | ||
1485 | else | ||
1486 | rx_ok = myri10ge_rx_done(ss, &ss->rx_big, | ||
1487 | mgp->big_bytes, | ||
1488 | length, checksum); | ||
1489 | rx_packets += rx_ok; | 1498 | rx_packets += rx_ok; |
1490 | rx_bytes += rx_ok * (unsigned long)length; | 1499 | rx_bytes += rx_ok * (unsigned long)length; |
1491 | cnt++; | 1500 | cnt++; |
@@ -1497,7 +1506,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) | |||
1497 | ss->stats.rx_packets += rx_packets; | 1506 | ss->stats.rx_packets += rx_packets; |
1498 | ss->stats.rx_bytes += rx_bytes; | 1507 | ss->stats.rx_bytes += rx_bytes; |
1499 | 1508 | ||
1500 | if (netdev->features & NETIF_F_LRO) | 1509 | if (lro_enabled) |
1501 | lro_flush_all(&rx_done->lro_mgr); | 1510 | lro_flush_all(&rx_done->lro_mgr); |
1502 | 1511 | ||
1503 | /* restock receive rings if needed */ | 1512 | /* restock receive rings if needed */ |
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index 653d308e0f5d..3bdcc803ec68 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c | |||
@@ -871,7 +871,7 @@ static int netxen_nic_set_flags(struct net_device *netdev, u32 data) | |||
871 | struct netxen_adapter *adapter = netdev_priv(netdev); | 871 | struct netxen_adapter *adapter = netdev_priv(netdev); |
872 | int hw_lro; | 872 | int hw_lro; |
873 | 873 | ||
874 | if (data & ~ETH_FLAG_LRO) | 874 | if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) |
875 | return -EINVAL; | 875 | return -EINVAL; |
876 | 876 | ||
877 | if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)) | 877 | if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)) |
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c index 4c14510e2a87..45b2755d6cba 100644 --- a/drivers/net/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/qlcnic/qlcnic_ethtool.c | |||
@@ -1003,7 +1003,7 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data) | |||
1003 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 1003 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
1004 | int hw_lro; | 1004 | int hw_lro; |
1005 | 1005 | ||
1006 | if (data & ~ETH_FLAG_LRO) | 1006 | if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) |
1007 | return -EINVAL; | 1007 | return -EINVAL; |
1008 | 1008 | ||
1009 | if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) | 1009 | if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 2ad6364103ea..356e74d20b80 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -6726,7 +6726,7 @@ static int s2io_ethtool_set_flags(struct net_device *dev, u32 data) | |||
6726 | int rc = 0; | 6726 | int rc = 0; |
6727 | int changed = 0; | 6727 | int changed = 0; |
6728 | 6728 | ||
6729 | if (data & ~ETH_FLAG_LRO) | 6729 | if (ethtool_invalid_flags(dev, data, ETH_FLAG_LRO)) |
6730 | return -EINVAL; | 6730 | return -EINVAL; |
6731 | 6731 | ||
6732 | if (data & ETH_FLAG_LRO) { | 6732 | if (data & ETH_FLAG_LRO) { |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index ebec88882c3b..73c942d85f07 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -48,9 +48,9 @@ | |||
48 | #include <net/ip.h> | 48 | #include <net/ip.h> |
49 | 49 | ||
50 | #include <asm/system.h> | 50 | #include <asm/system.h> |
51 | #include <asm/io.h> | 51 | #include <linux/io.h> |
52 | #include <asm/byteorder.h> | 52 | #include <asm/byteorder.h> |
53 | #include <asm/uaccess.h> | 53 | #include <linux/uaccess.h> |
54 | 54 | ||
55 | #ifdef CONFIG_SPARC | 55 | #ifdef CONFIG_SPARC |
56 | #include <asm/idprom.h> | 56 | #include <asm/idprom.h> |
@@ -13118,7 +13118,7 @@ done: | |||
13118 | 13118 | ||
13119 | static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); | 13119 | static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); |
13120 | 13120 | ||
13121 | static void inline vlan_features_add(struct net_device *dev, unsigned long flags) | 13121 | static inline void vlan_features_add(struct net_device *dev, unsigned long flags) |
13122 | { | 13122 | { |
13123 | dev->vlan_features |= flags; | 13123 | dev->vlan_features |= flags; |
13124 | } | 13124 | } |
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 81254be85b92..51f2ef142a5b 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c | |||
@@ -304,8 +304,8 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) | |||
304 | u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; | 304 | u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; |
305 | unsigned long flags; | 305 | unsigned long flags; |
306 | 306 | ||
307 | if (data & ~ETH_FLAG_LRO) | 307 | if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) |
308 | return -EOPNOTSUPP; | 308 | return -EINVAL; |
309 | 309 | ||
310 | if (lro_requested ^ lro_present) { | 310 | if (lro_requested ^ lro_present) { |
311 | /* toggle the LRO feature*/ | 311 | /* toggle the LRO feature*/ |
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c index 1dd3a21b3a43..c5eb034107fd 100644 --- a/drivers/net/vxge/vxge-ethtool.c +++ b/drivers/net/vxge/vxge-ethtool.c | |||
@@ -1117,8 +1117,8 @@ static int vxge_set_flags(struct net_device *dev, u32 data) | |||
1117 | struct vxgedev *vdev = netdev_priv(dev); | 1117 | struct vxgedev *vdev = netdev_priv(dev); |
1118 | enum vxge_hw_status status; | 1118 | enum vxge_hw_status status; |
1119 | 1119 | ||
1120 | if (data & ~ETH_FLAG_RXHASH) | 1120 | if (ethtool_invalid_flags(dev, data, ETH_FLAG_RXHASH)) |
1121 | return -EOPNOTSUPP; | 1121 | return -EINVAL; |
1122 | 1122 | ||
1123 | if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en) | 1123 | if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en) |
1124 | return 0; | 1124 | return 0; |
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c index 18d24b7b1e34..7ecc0bda57b3 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/p54/p54spi.c | |||
@@ -649,8 +649,7 @@ static int __devinit p54spi_probe(struct spi_device *spi) | |||
649 | goto err_free_common; | 649 | goto err_free_common; |
650 | } | 650 | } |
651 | 651 | ||
652 | set_irq_type(gpio_to_irq(p54spi_gpio_irq), | 652 | irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING); |
653 | IRQ_TYPE_EDGE_RISING); | ||
654 | 653 | ||
655 | disable_irq(gpio_to_irq(p54spi_gpio_irq)); | 654 | disable_irq(gpio_to_irq(p54spi_gpio_irq)); |
656 | 655 | ||
diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c index d550b5e68d3c..f51a0241a440 100644 --- a/drivers/net/wireless/wl1251/sdio.c +++ b/drivers/net/wireless/wl1251/sdio.c | |||
@@ -265,7 +265,7 @@ static int wl1251_sdio_probe(struct sdio_func *func, | |||
265 | goto disable; | 265 | goto disable; |
266 | } | 266 | } |
267 | 267 | ||
268 | set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); | 268 | irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); |
269 | disable_irq(wl->irq); | 269 | disable_irq(wl->irq); |
270 | 270 | ||
271 | wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq; | 271 | wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq; |
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c index ac872b38960f..af6448c4d3e2 100644 --- a/drivers/net/wireless/wl1251/spi.c +++ b/drivers/net/wireless/wl1251/spi.c | |||
@@ -286,7 +286,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi) | |||
286 | goto out_free; | 286 | goto out_free; |
287 | } | 287 | } |
288 | 288 | ||
289 | set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); | 289 | irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); |
290 | 290 | ||
291 | disable_irq(wl->irq); | 291 | disable_irq(wl->irq); |
292 | 292 | ||
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c index deeec32a5803..103095bbe8c0 100644 --- a/drivers/parisc/eisa.c +++ b/drivers/parisc/eisa.c | |||
@@ -340,7 +340,7 @@ static int __init eisa_probe(struct parisc_device *dev) | |||
340 | /* Reserve IRQ2 */ | 340 | /* Reserve IRQ2 */ |
341 | setup_irq(2, &irq2_action); | 341 | setup_irq(2, &irq2_action); |
342 | for (i = 0; i < 16; i++) { | 342 | for (i = 0; i < 16; i++) { |
343 | set_irq_chip_and_handler(i, &eisa_interrupt_type, | 343 | irq_set_chip_and_handler(i, &eisa_interrupt_type, |
344 | handle_simple_irq); | 344 | handle_simple_irq); |
345 | } | 345 | } |
346 | 346 | ||
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c index ef31080cf591..1bab5a2cd359 100644 --- a/drivers/parisc/gsc.c +++ b/drivers/parisc/gsc.c | |||
@@ -152,8 +152,8 @@ int gsc_assign_irq(struct irq_chip *type, void *data) | |||
152 | if (irq > GSC_IRQ_MAX) | 152 | if (irq > GSC_IRQ_MAX) |
153 | return NO_IRQ; | 153 | return NO_IRQ; |
154 | 154 | ||
155 | set_irq_chip_and_handler(irq, type, handle_simple_irq); | 155 | irq_set_chip_and_handler(irq, type, handle_simple_irq); |
156 | set_irq_chip_data(irq, data); | 156 | irq_set_chip_data(irq, data); |
157 | 157 | ||
158 | return irq++; | 158 | return irq++; |
159 | } | 159 | } |
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c index a4d8ff66a639..e3b76d409dee 100644 --- a/drivers/parisc/superio.c +++ b/drivers/parisc/superio.c | |||
@@ -355,7 +355,8 @@ int superio_fixup_irq(struct pci_dev *pcidev) | |||
355 | #endif | 355 | #endif |
356 | 356 | ||
357 | for (i = 0; i < 16; i++) { | 357 | for (i = 0; i < 16; i++) { |
358 | set_irq_chip_and_handler(i, &superio_interrupt_type, handle_simple_irq); | 358 | irq_set_chip_and_handler(i, &superio_interrupt_type, |
359 | handle_simple_irq); | ||
359 | } | 360 | } |
360 | 361 | ||
361 | /* | 362 | /* |
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 09933eb9126b..12e02bf92c4a 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -1226,7 +1226,7 @@ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) | |||
1226 | 1226 | ||
1227 | void dmar_msi_unmask(struct irq_data *data) | 1227 | void dmar_msi_unmask(struct irq_data *data) |
1228 | { | 1228 | { |
1229 | struct intel_iommu *iommu = irq_data_get_irq_data(data); | 1229 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); |
1230 | unsigned long flag; | 1230 | unsigned long flag; |
1231 | 1231 | ||
1232 | /* unmask it */ | 1232 | /* unmask it */ |
@@ -1240,7 +1240,7 @@ void dmar_msi_unmask(struct irq_data *data) | |||
1240 | void dmar_msi_mask(struct irq_data *data) | 1240 | void dmar_msi_mask(struct irq_data *data) |
1241 | { | 1241 | { |
1242 | unsigned long flag; | 1242 | unsigned long flag; |
1243 | struct intel_iommu *iommu = irq_data_get_irq_data(data); | 1243 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); |
1244 | 1244 | ||
1245 | /* mask it */ | 1245 | /* mask it */ |
1246 | spin_lock_irqsave(&iommu->register_lock, flag); | 1246 | spin_lock_irqsave(&iommu->register_lock, flag); |
@@ -1252,7 +1252,7 @@ void dmar_msi_mask(struct irq_data *data) | |||
1252 | 1252 | ||
1253 | void dmar_msi_write(int irq, struct msi_msg *msg) | 1253 | void dmar_msi_write(int irq, struct msi_msg *msg) |
1254 | { | 1254 | { |
1255 | struct intel_iommu *iommu = get_irq_data(irq); | 1255 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
1256 | unsigned long flag; | 1256 | unsigned long flag; |
1257 | 1257 | ||
1258 | spin_lock_irqsave(&iommu->register_lock, flag); | 1258 | spin_lock_irqsave(&iommu->register_lock, flag); |
@@ -1264,7 +1264,7 @@ void dmar_msi_write(int irq, struct msi_msg *msg) | |||
1264 | 1264 | ||
1265 | void dmar_msi_read(int irq, struct msi_msg *msg) | 1265 | void dmar_msi_read(int irq, struct msi_msg *msg) |
1266 | { | 1266 | { |
1267 | struct intel_iommu *iommu = get_irq_data(irq); | 1267 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
1268 | unsigned long flag; | 1268 | unsigned long flag; |
1269 | 1269 | ||
1270 | spin_lock_irqsave(&iommu->register_lock, flag); | 1270 | spin_lock_irqsave(&iommu->register_lock, flag); |
@@ -1382,12 +1382,12 @@ int dmar_set_interrupt(struct intel_iommu *iommu) | |||
1382 | return -EINVAL; | 1382 | return -EINVAL; |
1383 | } | 1383 | } |
1384 | 1384 | ||
1385 | set_irq_data(irq, iommu); | 1385 | irq_set_handler_data(irq, iommu); |
1386 | iommu->irq = irq; | 1386 | iommu->irq = irq; |
1387 | 1387 | ||
1388 | ret = arch_setup_dmar_msi(irq); | 1388 | ret = arch_setup_dmar_msi(irq); |
1389 | if (ret) { | 1389 | if (ret) { |
1390 | set_irq_data(irq, NULL); | 1390 | irq_set_handler_data(irq, NULL); |
1391 | iommu->irq = 0; | 1391 | iommu->irq = 0; |
1392 | destroy_irq(irq); | 1392 | destroy_irq(irq); |
1393 | return ret; | 1393 | return ret; |
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c index 834842aa5bbf..db057b6fe0c8 100644 --- a/drivers/pci/htirq.c +++ b/drivers/pci/htirq.c | |||
@@ -34,7 +34,7 @@ struct ht_irq_cfg { | |||
34 | 34 | ||
35 | void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) | 35 | void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) |
36 | { | 36 | { |
37 | struct ht_irq_cfg *cfg = get_irq_data(irq); | 37 | struct ht_irq_cfg *cfg = irq_get_handler_data(irq); |
38 | unsigned long flags; | 38 | unsigned long flags; |
39 | spin_lock_irqsave(&ht_irq_lock, flags); | 39 | spin_lock_irqsave(&ht_irq_lock, flags); |
40 | if (cfg->msg.address_lo != msg->address_lo) { | 40 | if (cfg->msg.address_lo != msg->address_lo) { |
@@ -53,13 +53,13 @@ void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) | |||
53 | 53 | ||
54 | void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) | 54 | void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) |
55 | { | 55 | { |
56 | struct ht_irq_cfg *cfg = get_irq_data(irq); | 56 | struct ht_irq_cfg *cfg = irq_get_handler_data(irq); |
57 | *msg = cfg->msg; | 57 | *msg = cfg->msg; |
58 | } | 58 | } |
59 | 59 | ||
60 | void mask_ht_irq(struct irq_data *data) | 60 | void mask_ht_irq(struct irq_data *data) |
61 | { | 61 | { |
62 | struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); | 62 | struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data); |
63 | struct ht_irq_msg msg = cfg->msg; | 63 | struct ht_irq_msg msg = cfg->msg; |
64 | 64 | ||
65 | msg.address_lo |= 1; | 65 | msg.address_lo |= 1; |
@@ -68,7 +68,7 @@ void mask_ht_irq(struct irq_data *data) | |||
68 | 68 | ||
69 | void unmask_ht_irq(struct irq_data *data) | 69 | void unmask_ht_irq(struct irq_data *data) |
70 | { | 70 | { |
71 | struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); | 71 | struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data); |
72 | struct ht_irq_msg msg = cfg->msg; | 72 | struct ht_irq_msg msg = cfg->msg; |
73 | 73 | ||
74 | msg.address_lo &= ~1; | 74 | msg.address_lo &= ~1; |
@@ -126,7 +126,7 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update) | |||
126 | kfree(cfg); | 126 | kfree(cfg); |
127 | return -EBUSY; | 127 | return -EBUSY; |
128 | } | 128 | } |
129 | set_irq_data(irq, cfg); | 129 | irq_set_handler_data(irq, cfg); |
130 | 130 | ||
131 | if (arch_setup_ht_irq(irq, dev) < 0) { | 131 | if (arch_setup_ht_irq(irq, dev) < 0) { |
132 | ht_destroy_irq(irq); | 132 | ht_destroy_irq(irq); |
@@ -162,9 +162,9 @@ void ht_destroy_irq(unsigned int irq) | |||
162 | { | 162 | { |
163 | struct ht_irq_cfg *cfg; | 163 | struct ht_irq_cfg *cfg; |
164 | 164 | ||
165 | cfg = get_irq_data(irq); | 165 | cfg = irq_get_handler_data(irq); |
166 | set_irq_chip(irq, NULL); | 166 | irq_set_chip(irq, NULL); |
167 | set_irq_data(irq, NULL); | 167 | irq_set_handler_data(irq, NULL); |
168 | destroy_irq(irq); | 168 | destroy_irq(irq); |
169 | 169 | ||
170 | kfree(cfg); | 170 | kfree(cfg); |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index a4115f1afe1f..7da3bef60d87 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -1206,7 +1206,7 @@ void free_dmar_iommu(struct intel_iommu *iommu) | |||
1206 | iommu_disable_translation(iommu); | 1206 | iommu_disable_translation(iommu); |
1207 | 1207 | ||
1208 | if (iommu->irq) { | 1208 | if (iommu->irq) { |
1209 | set_irq_data(iommu->irq, NULL); | 1209 | irq_set_handler_data(iommu->irq, NULL); |
1210 | /* This will mask the irq */ | 1210 | /* This will mask the irq */ |
1211 | free_irq(iommu->irq, iommu); | 1211 | free_irq(iommu->irq, iommu); |
1212 | destroy_irq(iommu->irq); | 1212 | destroy_irq(iommu->irq); |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index ec87cd66f3eb..a22557b20283 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -50,7 +50,7 @@ static DEFINE_SPINLOCK(irq_2_ir_lock); | |||
50 | 50 | ||
51 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | 51 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
52 | { | 52 | { |
53 | struct irq_cfg *cfg = get_irq_chip_data(irq); | 53 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
54 | return cfg ? &cfg->irq_2_iommu : NULL; | 54 | return cfg ? &cfg->irq_2_iommu : NULL; |
55 | } | 55 | } |
56 | 56 | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 44b0aeee83e5..2f10328bf661 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -236,7 +236,7 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | |||
236 | 236 | ||
237 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) | 237 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) |
238 | { | 238 | { |
239 | struct msi_desc *entry = get_irq_msi(irq); | 239 | struct msi_desc *entry = irq_get_msi_desc(irq); |
240 | 240 | ||
241 | __read_msi_msg(entry, msg); | 241 | __read_msi_msg(entry, msg); |
242 | } | 242 | } |
@@ -253,7 +253,7 @@ void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | |||
253 | 253 | ||
254 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) | 254 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) |
255 | { | 255 | { |
256 | struct msi_desc *entry = get_irq_msi(irq); | 256 | struct msi_desc *entry = irq_get_msi_desc(irq); |
257 | 257 | ||
258 | __get_cached_msi_msg(entry, msg); | 258 | __get_cached_msi_msg(entry, msg); |
259 | } | 259 | } |
@@ -297,7 +297,7 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | |||
297 | 297 | ||
298 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) | 298 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) |
299 | { | 299 | { |
300 | struct msi_desc *entry = get_irq_msi(irq); | 300 | struct msi_desc *entry = irq_get_msi_desc(irq); |
301 | 301 | ||
302 | __write_msi_msg(entry, msg); | 302 | __write_msi_msg(entry, msg); |
303 | } | 303 | } |
@@ -354,7 +354,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev) | |||
354 | if (!dev->msi_enabled) | 354 | if (!dev->msi_enabled) |
355 | return; | 355 | return; |
356 | 356 | ||
357 | entry = get_irq_msi(dev->irq); | 357 | entry = irq_get_msi_desc(dev->irq); |
358 | pos = entry->msi_attrib.pos; | 358 | pos = entry->msi_attrib.pos; |
359 | 359 | ||
360 | pci_intx_for_msi(dev, 0); | 360 | pci_intx_for_msi(dev, 0); |
@@ -519,7 +519,7 @@ static void msix_program_entries(struct pci_dev *dev, | |||
519 | PCI_MSIX_ENTRY_VECTOR_CTRL; | 519 | PCI_MSIX_ENTRY_VECTOR_CTRL; |
520 | 520 | ||
521 | entries[i].vector = entry->irq; | 521 | entries[i].vector = entry->irq; |
522 | set_irq_msi(entry->irq, entry); | 522 | irq_set_msi_desc(entry->irq, entry); |
523 | entry->masked = readl(entry->mask_base + offset); | 523 | entry->masked = readl(entry->mask_base + offset); |
524 | msix_mask_irq(entry, 1); | 524 | msix_mask_irq(entry, 1); |
525 | i++; | 525 | i++; |
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c index eae9cbe37a3e..49221395101e 100644 --- a/drivers/pcmcia/bfin_cf_pcmcia.c +++ b/drivers/pcmcia/bfin_cf_pcmcia.c | |||
@@ -235,7 +235,7 @@ static int __devinit bfin_cf_probe(struct platform_device *pdev) | |||
235 | cf->irq = irq; | 235 | cf->irq = irq; |
236 | cf->socket.pci_irq = irq; | 236 | cf->socket.pci_irq = irq; |
237 | 237 | ||
238 | set_irq_type(irq, IRQF_TRIGGER_LOW); | 238 | irq_set_irq_type(irq, IRQF_TRIGGER_LOW); |
239 | 239 | ||
240 | io_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 240 | io_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
241 | attr_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 241 | attr_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c index 27575e6378a1..01757f18a208 100644 --- a/drivers/pcmcia/db1xxx_ss.c +++ b/drivers/pcmcia/db1xxx_ss.c | |||
@@ -181,7 +181,7 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock) | |||
181 | /* all other (older) Db1x00 boards use a GPIO to show | 181 | /* all other (older) Db1x00 boards use a GPIO to show |
182 | * card detection status: use both-edge triggers. | 182 | * card detection status: use both-edge triggers. |
183 | */ | 183 | */ |
184 | set_irq_type(sock->insert_irq, IRQ_TYPE_EDGE_BOTH); | 184 | irq_set_irq_type(sock->insert_irq, IRQ_TYPE_EDGE_BOTH); |
185 | ret = request_irq(sock->insert_irq, db1000_pcmcia_cdirq, | 185 | ret = request_irq(sock->insert_irq, db1000_pcmcia_cdirq, |
186 | 0, "pcmcia_carddetect", sock); | 186 | 0, "pcmcia_carddetect", sock); |
187 | 187 | ||
diff --git a/drivers/pcmcia/pxa2xx_colibri.c b/drivers/pcmcia/pxa2xx_colibri.c index a52039564e74..443cb7fc872d 100644 --- a/drivers/pcmcia/pxa2xx_colibri.c +++ b/drivers/pcmcia/pxa2xx_colibri.c | |||
@@ -34,14 +34,24 @@ | |||
34 | #define COLIBRI320_DETECT_GPIO 81 | 34 | #define COLIBRI320_DETECT_GPIO 81 |
35 | #define COLIBRI320_READY_GPIO 29 | 35 | #define COLIBRI320_READY_GPIO 29 |
36 | 36 | ||
37 | static struct { | 37 | enum { |
38 | int reset_gpio; | 38 | DETECT = 0, |
39 | int ppen_gpio; | 39 | READY = 1, |
40 | int bvd1_gpio; | 40 | BVD1 = 2, |
41 | int bvd2_gpio; | 41 | BVD2 = 3, |
42 | int detect_gpio; | 42 | PPEN = 4, |
43 | int ready_gpio; | 43 | RESET = 5, |
44 | } colibri_pcmcia_gpio; | 44 | }; |
45 | |||
46 | /* Contents of this array are configured on-the-fly in init function */ | ||
47 | static struct gpio colibri_pcmcia_gpios[] = { | ||
48 | { 0, GPIOF_IN, "PCMCIA Detect" }, | ||
49 | { 0, GPIOF_IN, "PCMCIA Ready" }, | ||
50 | { 0, GPIOF_IN, "PCMCIA BVD1" }, | ||
51 | { 0, GPIOF_IN, "PCMCIA BVD2" }, | ||
52 | { 0, GPIOF_INIT_LOW, "PCMCIA PPEN" }, | ||
53 | { 0, GPIOF_INIT_HIGH,"PCMCIA Reset" }, | ||
54 | }; | ||
45 | 55 | ||
46 | static struct pcmcia_irqs colibri_irqs[] = { | 56 | static struct pcmcia_irqs colibri_irqs[] = { |
47 | { | 57 | { |
@@ -54,88 +64,42 @@ static int colibri_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
54 | { | 64 | { |
55 | int ret; | 65 | int ret; |
56 | 66 | ||
57 | ret = gpio_request(colibri_pcmcia_gpio.detect_gpio, "DETECT"); | 67 | ret = gpio_request_array(colibri_pcmcia_gpios, |
68 | ARRAY_SIZE(colibri_pcmcia_gpios)); | ||
58 | if (ret) | 69 | if (ret) |
59 | goto err1; | 70 | goto err1; |
60 | ret = gpio_direction_input(colibri_pcmcia_gpio.detect_gpio); | ||
61 | if (ret) | ||
62 | goto err2; | ||
63 | |||
64 | ret = gpio_request(colibri_pcmcia_gpio.ready_gpio, "READY"); | ||
65 | if (ret) | ||
66 | goto err2; | ||
67 | ret = gpio_direction_input(colibri_pcmcia_gpio.ready_gpio); | ||
68 | if (ret) | ||
69 | goto err3; | ||
70 | 71 | ||
71 | ret = gpio_request(colibri_pcmcia_gpio.bvd1_gpio, "BVD1"); | 72 | colibri_irqs[0].irq = gpio_to_irq(colibri_pcmcia_gpios[DETECT].gpio); |
72 | if (ret) | 73 | skt->socket.pci_irq = gpio_to_irq(colibri_pcmcia_gpios[READY].gpio); |
73 | goto err3; | ||
74 | ret = gpio_direction_input(colibri_pcmcia_gpio.bvd1_gpio); | ||
75 | if (ret) | ||
76 | goto err4; | ||
77 | 74 | ||
78 | ret = gpio_request(colibri_pcmcia_gpio.bvd2_gpio, "BVD2"); | 75 | ret = soc_pcmcia_request_irqs(skt, colibri_irqs, |
79 | if (ret) | 76 | ARRAY_SIZE(colibri_irqs)); |
80 | goto err4; | ||
81 | ret = gpio_direction_input(colibri_pcmcia_gpio.bvd2_gpio); | ||
82 | if (ret) | ||
83 | goto err5; | ||
84 | |||
85 | ret = gpio_request(colibri_pcmcia_gpio.ppen_gpio, "PPEN"); | ||
86 | if (ret) | ||
87 | goto err5; | ||
88 | ret = gpio_direction_output(colibri_pcmcia_gpio.ppen_gpio, 0); | ||
89 | if (ret) | ||
90 | goto err6; | ||
91 | |||
92 | ret = gpio_request(colibri_pcmcia_gpio.reset_gpio, "RESET"); | ||
93 | if (ret) | ||
94 | goto err6; | ||
95 | ret = gpio_direction_output(colibri_pcmcia_gpio.reset_gpio, 1); | ||
96 | if (ret) | 77 | if (ret) |
97 | goto err7; | 78 | goto err2; |
98 | |||
99 | colibri_irqs[0].irq = gpio_to_irq(colibri_pcmcia_gpio.detect_gpio); | ||
100 | skt->socket.pci_irq = gpio_to_irq(colibri_pcmcia_gpio.ready_gpio); | ||
101 | 79 | ||
102 | return soc_pcmcia_request_irqs(skt, colibri_irqs, | 80 | return ret; |
103 | ARRAY_SIZE(colibri_irqs)); | ||
104 | 81 | ||
105 | err7: | ||
106 | gpio_free(colibri_pcmcia_gpio.detect_gpio); | ||
107 | err6: | ||
108 | gpio_free(colibri_pcmcia_gpio.ready_gpio); | ||
109 | err5: | ||
110 | gpio_free(colibri_pcmcia_gpio.bvd1_gpio); | ||
111 | err4: | ||
112 | gpio_free(colibri_pcmcia_gpio.bvd2_gpio); | ||
113 | err3: | ||
114 | gpio_free(colibri_pcmcia_gpio.reset_gpio); | ||
115 | err2: | 82 | err2: |
116 | gpio_free(colibri_pcmcia_gpio.ppen_gpio); | 83 | gpio_free_array(colibri_pcmcia_gpios, |
84 | ARRAY_SIZE(colibri_pcmcia_gpios)); | ||
117 | err1: | 85 | err1: |
118 | return ret; | 86 | return ret; |
119 | } | 87 | } |
120 | 88 | ||
121 | static void colibri_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) | 89 | static void colibri_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) |
122 | { | 90 | { |
123 | gpio_free(colibri_pcmcia_gpio.detect_gpio); | 91 | gpio_free_array(colibri_pcmcia_gpios, |
124 | gpio_free(colibri_pcmcia_gpio.ready_gpio); | 92 | ARRAY_SIZE(colibri_pcmcia_gpios)); |
125 | gpio_free(colibri_pcmcia_gpio.bvd1_gpio); | ||
126 | gpio_free(colibri_pcmcia_gpio.bvd2_gpio); | ||
127 | gpio_free(colibri_pcmcia_gpio.reset_gpio); | ||
128 | gpio_free(colibri_pcmcia_gpio.ppen_gpio); | ||
129 | } | 93 | } |
130 | 94 | ||
131 | static void colibri_pcmcia_socket_state(struct soc_pcmcia_socket *skt, | 95 | static void colibri_pcmcia_socket_state(struct soc_pcmcia_socket *skt, |
132 | struct pcmcia_state *state) | 96 | struct pcmcia_state *state) |
133 | { | 97 | { |
134 | 98 | ||
135 | state->detect = !!gpio_get_value(colibri_pcmcia_gpio.detect_gpio); | 99 | state->detect = !!gpio_get_value(colibri_pcmcia_gpios[DETECT].gpio); |
136 | state->ready = !!gpio_get_value(colibri_pcmcia_gpio.ready_gpio); | 100 | state->ready = !!gpio_get_value(colibri_pcmcia_gpios[READY].gpio); |
137 | state->bvd1 = !!gpio_get_value(colibri_pcmcia_gpio.bvd1_gpio); | 101 | state->bvd1 = !!gpio_get_value(colibri_pcmcia_gpios[BVD1].gpio); |
138 | state->bvd2 = !!gpio_get_value(colibri_pcmcia_gpio.bvd2_gpio); | 102 | state->bvd2 = !!gpio_get_value(colibri_pcmcia_gpios[BVD2].gpio); |
139 | state->wrprot = 0; | 103 | state->wrprot = 0; |
140 | state->vs_3v = 1; | 104 | state->vs_3v = 1; |
141 | state->vs_Xv = 0; | 105 | state->vs_Xv = 0; |
@@ -145,9 +109,10 @@ static int | |||
145 | colibri_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, | 109 | colibri_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, |
146 | const socket_state_t *state) | 110 | const socket_state_t *state) |
147 | { | 111 | { |
148 | gpio_set_value(colibri_pcmcia_gpio.ppen_gpio, | 112 | gpio_set_value(colibri_pcmcia_gpios[PPEN].gpio, |
149 | !(state->Vcc == 33 && state->Vpp < 50)); | 113 | !(state->Vcc == 33 && state->Vpp < 50)); |
150 | gpio_set_value(colibri_pcmcia_gpio.reset_gpio, state->flags & SS_RESET); | 114 | gpio_set_value(colibri_pcmcia_gpios[RESET].gpio, |
115 | state->flags & SS_RESET); | ||
151 | return 0; | 116 | return 0; |
152 | } | 117 | } |
153 | 118 | ||
@@ -190,20 +155,20 @@ static int __init colibri_pcmcia_init(void) | |||
190 | 155 | ||
191 | /* Colibri PXA270 */ | 156 | /* Colibri PXA270 */ |
192 | if (machine_is_colibri()) { | 157 | if (machine_is_colibri()) { |
193 | colibri_pcmcia_gpio.reset_gpio = COLIBRI270_RESET_GPIO; | 158 | colibri_pcmcia_gpios[RESET].gpio = COLIBRI270_RESET_GPIO; |
194 | colibri_pcmcia_gpio.ppen_gpio = COLIBRI270_PPEN_GPIO; | 159 | colibri_pcmcia_gpios[PPEN].gpio = COLIBRI270_PPEN_GPIO; |
195 | colibri_pcmcia_gpio.bvd1_gpio = COLIBRI270_BVD1_GPIO; | 160 | colibri_pcmcia_gpios[BVD1].gpio = COLIBRI270_BVD1_GPIO; |
196 | colibri_pcmcia_gpio.bvd2_gpio = COLIBRI270_BVD2_GPIO; | 161 | colibri_pcmcia_gpios[BVD2].gpio = COLIBRI270_BVD2_GPIO; |
197 | colibri_pcmcia_gpio.detect_gpio = COLIBRI270_DETECT_GPIO; | 162 | colibri_pcmcia_gpios[DETECT].gpio = COLIBRI270_DETECT_GPIO; |
198 | colibri_pcmcia_gpio.ready_gpio = COLIBRI270_READY_GPIO; | 163 | colibri_pcmcia_gpios[READY].gpio = COLIBRI270_READY_GPIO; |
199 | /* Colibri PXA320 */ | 164 | /* Colibri PXA320 */ |
200 | } else if (machine_is_colibri320()) { | 165 | } else if (machine_is_colibri320()) { |
201 | colibri_pcmcia_gpio.reset_gpio = COLIBRI320_RESET_GPIO; | 166 | colibri_pcmcia_gpios[RESET].gpio = COLIBRI320_RESET_GPIO; |
202 | colibri_pcmcia_gpio.ppen_gpio = COLIBRI320_PPEN_GPIO; | 167 | colibri_pcmcia_gpios[PPEN].gpio = COLIBRI320_PPEN_GPIO; |
203 | colibri_pcmcia_gpio.bvd1_gpio = COLIBRI320_BVD1_GPIO; | 168 | colibri_pcmcia_gpios[BVD1].gpio = COLIBRI320_BVD1_GPIO; |
204 | colibri_pcmcia_gpio.bvd2_gpio = COLIBRI320_BVD2_GPIO; | 169 | colibri_pcmcia_gpios[BVD2].gpio = COLIBRI320_BVD2_GPIO; |
205 | colibri_pcmcia_gpio.detect_gpio = COLIBRI320_DETECT_GPIO; | 170 | colibri_pcmcia_gpios[DETECT].gpio = COLIBRI320_DETECT_GPIO; |
206 | colibri_pcmcia_gpio.ready_gpio = COLIBRI320_READY_GPIO; | 171 | colibri_pcmcia_gpios[READY].gpio = COLIBRI320_READY_GPIO; |
207 | } | 172 | } |
208 | 173 | ||
209 | ret = platform_device_add_data(colibri_pcmcia_device, | 174 | ret = platform_device_add_data(colibri_pcmcia_device, |
diff --git a/drivers/pcmcia/pxa2xx_palmld.c b/drivers/pcmcia/pxa2xx_palmld.c index 6fb6f7f0672e..69f73670949a 100644 --- a/drivers/pcmcia/pxa2xx_palmld.c +++ b/drivers/pcmcia/pxa2xx_palmld.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Driver for Palm LifeDrive PCMCIA | 4 | * Driver for Palm LifeDrive PCMCIA |
5 | * | 5 | * |
6 | * Copyright (C) 2006 Alex Osborne <ato@meshy.org> | 6 | * Copyright (C) 2006 Alex Osborne <ato@meshy.org> |
7 | * Copyright (C) 2007-2008 Marek Vasut <marek.vasut@gmail.com> | 7 | * Copyright (C) 2007-2011 Marek Vasut <marek.vasut@gmail.com> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
@@ -20,49 +20,27 @@ | |||
20 | #include <mach/palmld.h> | 20 | #include <mach/palmld.h> |
21 | #include "soc_common.h" | 21 | #include "soc_common.h" |
22 | 22 | ||
23 | static struct gpio palmld_pcmcia_gpios[] = { | ||
24 | { GPIO_NR_PALMLD_PCMCIA_POWER, GPIOF_INIT_LOW, "PCMCIA Power" }, | ||
25 | { GPIO_NR_PALMLD_PCMCIA_RESET, GPIOF_INIT_HIGH,"PCMCIA Reset" }, | ||
26 | { GPIO_NR_PALMLD_PCMCIA_READY, GPIOF_IN, "PCMCIA Ready" }, | ||
27 | }; | ||
28 | |||
23 | static int palmld_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | 29 | static int palmld_pcmcia_hw_init(struct soc_pcmcia_socket *skt) |
24 | { | 30 | { |
25 | int ret; | 31 | int ret; |
26 | 32 | ||
27 | ret = gpio_request(GPIO_NR_PALMLD_PCMCIA_POWER, "PCMCIA PWR"); | 33 | ret = gpio_request_array(palmld_pcmcia_gpios, |
28 | if (ret) | 34 | ARRAY_SIZE(palmld_pcmcia_gpios)); |
29 | goto err1; | ||
30 | ret = gpio_direction_output(GPIO_NR_PALMLD_PCMCIA_POWER, 0); | ||
31 | if (ret) | ||
32 | goto err2; | ||
33 | |||
34 | ret = gpio_request(GPIO_NR_PALMLD_PCMCIA_RESET, "PCMCIA RST"); | ||
35 | if (ret) | ||
36 | goto err2; | ||
37 | ret = gpio_direction_output(GPIO_NR_PALMLD_PCMCIA_RESET, 1); | ||
38 | if (ret) | ||
39 | goto err3; | ||
40 | |||
41 | ret = gpio_request(GPIO_NR_PALMLD_PCMCIA_READY, "PCMCIA RDY"); | ||
42 | if (ret) | ||
43 | goto err3; | ||
44 | ret = gpio_direction_input(GPIO_NR_PALMLD_PCMCIA_READY); | ||
45 | if (ret) | ||
46 | goto err4; | ||
47 | 35 | ||
48 | skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMLD_PCMCIA_READY); | 36 | skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMLD_PCMCIA_READY); |
49 | return 0; | ||
50 | 37 | ||
51 | err4: | ||
52 | gpio_free(GPIO_NR_PALMLD_PCMCIA_READY); | ||
53 | err3: | ||
54 | gpio_free(GPIO_NR_PALMLD_PCMCIA_RESET); | ||
55 | err2: | ||
56 | gpio_free(GPIO_NR_PALMLD_PCMCIA_POWER); | ||
57 | err1: | ||
58 | return ret; | 38 | return ret; |
59 | } | 39 | } |
60 | 40 | ||
61 | static void palmld_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) | 41 | static void palmld_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) |
62 | { | 42 | { |
63 | gpio_free(GPIO_NR_PALMLD_PCMCIA_READY); | 43 | gpio_free_array(palmld_pcmcia_gpios, ARRAY_SIZE(palmld_pcmcia_gpios)); |
64 | gpio_free(GPIO_NR_PALMLD_PCMCIA_RESET); | ||
65 | gpio_free(GPIO_NR_PALMLD_PCMCIA_POWER); | ||
66 | } | 44 | } |
67 | 45 | ||
68 | static void palmld_pcmcia_socket_state(struct soc_pcmcia_socket *skt, | 46 | static void palmld_pcmcia_socket_state(struct soc_pcmcia_socket *skt, |
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/drivers/pcmcia/pxa2xx_palmtc.c index 459a232d66be..d0ad6a76bbde 100644 --- a/drivers/pcmcia/pxa2xx_palmtc.c +++ b/drivers/pcmcia/pxa2xx_palmtc.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Driver for Palm Tungsten|C PCMCIA | 4 | * Driver for Palm Tungsten|C PCMCIA |
5 | * | 5 | * |
6 | * Copyright (C) 2008 Alex Osborne <ato@meshy.org> | 6 | * Copyright (C) 2008 Alex Osborne <ato@meshy.org> |
7 | * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com> | 7 | * Copyright (C) 2009-2011 Marek Vasut <marek.vasut@gmail.com> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
@@ -21,79 +21,30 @@ | |||
21 | #include <mach/palmtc.h> | 21 | #include <mach/palmtc.h> |
22 | #include "soc_common.h" | 22 | #include "soc_common.h" |
23 | 23 | ||
24 | static struct gpio palmtc_pcmcia_gpios[] = { | ||
25 | { GPIO_NR_PALMTC_PCMCIA_POWER1, GPIOF_INIT_LOW, "PCMCIA Power 1" }, | ||
26 | { GPIO_NR_PALMTC_PCMCIA_POWER2, GPIOF_INIT_LOW, "PCMCIA Power 2" }, | ||
27 | { GPIO_NR_PALMTC_PCMCIA_POWER3, GPIOF_INIT_LOW, "PCMCIA Power 3" }, | ||
28 | { GPIO_NR_PALMTC_PCMCIA_RESET, GPIOF_INIT_HIGH,"PCMCIA Reset" }, | ||
29 | { GPIO_NR_PALMTC_PCMCIA_READY, GPIOF_IN, "PCMCIA Ready" }, | ||
30 | { GPIO_NR_PALMTC_PCMCIA_PWRREADY, GPIOF_IN, "PCMCIA Power Ready" }, | ||
31 | }; | ||
32 | |||
24 | static int palmtc_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | 33 | static int palmtc_pcmcia_hw_init(struct soc_pcmcia_socket *skt) |
25 | { | 34 | { |
26 | int ret; | 35 | int ret; |
27 | 36 | ||
28 | ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER1, "PCMCIA PWR1"); | 37 | ret = gpio_request_array(palmtc_pcmcia_gpios, |
29 | if (ret) | 38 | ARRAY_SIZE(palmtc_pcmcia_gpios)); |
30 | goto err1; | ||
31 | ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER1, 0); | ||
32 | if (ret) | ||
33 | goto err2; | ||
34 | |||
35 | ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER2, "PCMCIA PWR2"); | ||
36 | if (ret) | ||
37 | goto err2; | ||
38 | ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER2, 0); | ||
39 | if (ret) | ||
40 | goto err3; | ||
41 | |||
42 | ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER3, "PCMCIA PWR3"); | ||
43 | if (ret) | ||
44 | goto err3; | ||
45 | ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER3, 0); | ||
46 | if (ret) | ||
47 | goto err4; | ||
48 | |||
49 | ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_RESET, "PCMCIA RST"); | ||
50 | if (ret) | ||
51 | goto err4; | ||
52 | ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_RESET, 1); | ||
53 | if (ret) | ||
54 | goto err5; | ||
55 | |||
56 | ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_READY, "PCMCIA RDY"); | ||
57 | if (ret) | ||
58 | goto err5; | ||
59 | ret = gpio_direction_input(GPIO_NR_PALMTC_PCMCIA_READY); | ||
60 | if (ret) | ||
61 | goto err6; | ||
62 | |||
63 | ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_PWRREADY, "PCMCIA PWRRDY"); | ||
64 | if (ret) | ||
65 | goto err6; | ||
66 | ret = gpio_direction_input(GPIO_NR_PALMTC_PCMCIA_PWRREADY); | ||
67 | if (ret) | ||
68 | goto err7; | ||
69 | 39 | ||
70 | skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY); | 40 | skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY); |
71 | return 0; | ||
72 | 41 | ||
73 | err7: | ||
74 | gpio_free(GPIO_NR_PALMTC_PCMCIA_PWRREADY); | ||
75 | err6: | ||
76 | gpio_free(GPIO_NR_PALMTC_PCMCIA_READY); | ||
77 | err5: | ||
78 | gpio_free(GPIO_NR_PALMTC_PCMCIA_RESET); | ||
79 | err4: | ||
80 | gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER3); | ||
81 | err3: | ||
82 | gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER2); | ||
83 | err2: | ||
84 | gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER1); | ||
85 | err1: | ||
86 | return ret; | 42 | return ret; |
87 | } | 43 | } |
88 | 44 | ||
89 | static void palmtc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) | 45 | static void palmtc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) |
90 | { | 46 | { |
91 | gpio_free(GPIO_NR_PALMTC_PCMCIA_PWRREADY); | 47 | gpio_free_array(palmtc_pcmcia_gpios, ARRAY_SIZE(palmtc_pcmcia_gpios)); |
92 | gpio_free(GPIO_NR_PALMTC_PCMCIA_READY); | ||
93 | gpio_free(GPIO_NR_PALMTC_PCMCIA_RESET); | ||
94 | gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER3); | ||
95 | gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER2); | ||
96 | gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER1); | ||
97 | } | 48 | } |
98 | 49 | ||
99 | static void palmtc_pcmcia_socket_state(struct soc_pcmcia_socket *skt, | 50 | static void palmtc_pcmcia_socket_state(struct soc_pcmcia_socket *skt, |
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c index b07b247a399f..1a2580450402 100644 --- a/drivers/pcmcia/pxa2xx_palmtx.c +++ b/drivers/pcmcia/pxa2xx_palmtx.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Driver for Palm T|X PCMCIA | 4 | * Driver for Palm T|X PCMCIA |
5 | * | 5 | * |
6 | * Copyright (C) 2007-2008 Marek Vasut <marek.vasut@gmail.com> | 6 | * Copyright (C) 2007-2011 Marek Vasut <marek.vasut@gmail.com> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
@@ -13,67 +13,34 @@ | |||
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/gpio.h> | ||
16 | 17 | ||
17 | #include <asm/mach-types.h> | 18 | #include <asm/mach-types.h> |
18 | |||
19 | #include <mach/gpio.h> | ||
20 | #include <mach/palmtx.h> | 19 | #include <mach/palmtx.h> |
21 | |||
22 | #include "soc_common.h" | 20 | #include "soc_common.h" |
23 | 21 | ||
22 | static struct gpio palmtx_pcmcia_gpios[] = { | ||
23 | { GPIO_NR_PALMTX_PCMCIA_POWER1, GPIOF_INIT_LOW, "PCMCIA Power 1" }, | ||
24 | { GPIO_NR_PALMTX_PCMCIA_POWER2, GPIOF_INIT_LOW, "PCMCIA Power 2" }, | ||
25 | { GPIO_NR_PALMTX_PCMCIA_RESET, GPIOF_INIT_HIGH,"PCMCIA Reset" }, | ||
26 | { GPIO_NR_PALMTX_PCMCIA_READY, GPIOF_IN, "PCMCIA Ready" }, | ||
27 | }; | ||
28 | |||
24 | static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | 29 | static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt) |
25 | { | 30 | { |
26 | int ret; | 31 | int ret; |
27 | 32 | ||
28 | ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_POWER1, "PCMCIA PWR1"); | 33 | ret = gpio_request_array(palmtx_pcmcia_gpios, |
29 | if (ret) | 34 | ARRAY_SIZE(palmtx_pcmcia_gpios)); |
30 | goto err1; | ||
31 | ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_POWER1, 0); | ||
32 | if (ret) | ||
33 | goto err2; | ||
34 | |||
35 | ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_POWER2, "PCMCIA PWR2"); | ||
36 | if (ret) | ||
37 | goto err2; | ||
38 | ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_POWER2, 0); | ||
39 | if (ret) | ||
40 | goto err3; | ||
41 | |||
42 | ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_RESET, "PCMCIA RST"); | ||
43 | if (ret) | ||
44 | goto err3; | ||
45 | ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_RESET, 1); | ||
46 | if (ret) | ||
47 | goto err4; | ||
48 | |||
49 | ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_READY, "PCMCIA RDY"); | ||
50 | if (ret) | ||
51 | goto err4; | ||
52 | ret = gpio_direction_input(GPIO_NR_PALMTX_PCMCIA_READY); | ||
53 | if (ret) | ||
54 | goto err5; | ||
55 | 35 | ||
56 | skt->socket.pci_irq = gpio_to_irq(GPIO_NR_PALMTX_PCMCIA_READY); | 36 | skt->socket.pci_irq = gpio_to_irq(GPIO_NR_PALMTX_PCMCIA_READY); |
57 | return 0; | ||
58 | 37 | ||
59 | err5: | ||
60 | gpio_free(GPIO_NR_PALMTX_PCMCIA_READY); | ||
61 | err4: | ||
62 | gpio_free(GPIO_NR_PALMTX_PCMCIA_RESET); | ||
63 | err3: | ||
64 | gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER2); | ||
65 | err2: | ||
66 | gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER1); | ||
67 | err1: | ||
68 | return ret; | 38 | return ret; |
69 | } | 39 | } |
70 | 40 | ||
71 | static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) | 41 | static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) |
72 | { | 42 | { |
73 | gpio_free(GPIO_NR_PALMTX_PCMCIA_READY); | 43 | gpio_free_array(palmtx_pcmcia_gpios, ARRAY_SIZE(palmtx_pcmcia_gpios)); |
74 | gpio_free(GPIO_NR_PALMTX_PCMCIA_RESET); | ||
75 | gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER2); | ||
76 | gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER1); | ||
77 | } | 44 | } |
78 | 45 | ||
79 | static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt, | 46 | static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt, |
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c index 55627eccee8e..435002dfc3ca 100644 --- a/drivers/pcmcia/pxa2xx_vpac270.c +++ b/drivers/pcmcia/pxa2xx_vpac270.c | |||
@@ -3,8 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Driver for Voipac PXA270 PCMCIA and CF sockets | 4 | * Driver for Voipac PXA270 PCMCIA and CF sockets |
5 | * | 5 | * |
6 | * Copyright (C) 2010 | 6 | * Copyright (C) 2010-2011 Marek Vasut <marek.vasut@gmail.com> |
7 | * Marek Vasut <marek.vasut@gmail.com> | ||
8 | * | 7 | * |
9 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
@@ -22,6 +21,19 @@ | |||
22 | 21 | ||
23 | #include "soc_common.h" | 22 | #include "soc_common.h" |
24 | 23 | ||
24 | static struct gpio vpac270_pcmcia_gpios[] = { | ||
25 | { GPIO84_VPAC270_PCMCIA_CD, GPIOF_IN, "PCMCIA Card Detect" }, | ||
26 | { GPIO35_VPAC270_PCMCIA_RDY, GPIOF_IN, "PCMCIA Ready" }, | ||
27 | { GPIO107_VPAC270_PCMCIA_PPEN, GPIOF_INIT_LOW, "PCMCIA PPEN" }, | ||
28 | { GPIO11_VPAC270_PCMCIA_RESET, GPIOF_INIT_LOW, "PCMCIA Reset" }, | ||
29 | }; | ||
30 | |||
31 | static struct gpio vpac270_cf_gpios[] = { | ||
32 | { GPIO17_VPAC270_CF_CD, GPIOF_IN, "CF Card Detect" }, | ||
33 | { GPIO12_VPAC270_CF_RDY, GPIOF_IN, "CF Ready" }, | ||
34 | { GPIO16_VPAC270_CF_RESET, GPIOF_INIT_LOW, "CF Reset" }, | ||
35 | }; | ||
36 | |||
25 | static struct pcmcia_irqs cd_irqs[] = { | 37 | static struct pcmcia_irqs cd_irqs[] = { |
26 | { | 38 | { |
27 | .sock = 0, | 39 | .sock = 0, |
@@ -40,96 +52,34 @@ static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
40 | int ret; | 52 | int ret; |
41 | 53 | ||
42 | if (skt->nr == 0) { | 54 | if (skt->nr == 0) { |
43 | ret = gpio_request(GPIO84_VPAC270_PCMCIA_CD, "PCMCIA CD"); | 55 | ret = gpio_request_array(vpac270_pcmcia_gpios, |
44 | if (ret) | 56 | ARRAY_SIZE(vpac270_pcmcia_gpios)); |
45 | goto err1; | ||
46 | ret = gpio_direction_input(GPIO84_VPAC270_PCMCIA_CD); | ||
47 | if (ret) | ||
48 | goto err2; | ||
49 | |||
50 | ret = gpio_request(GPIO35_VPAC270_PCMCIA_RDY, "PCMCIA RDY"); | ||
51 | if (ret) | ||
52 | goto err2; | ||
53 | ret = gpio_direction_input(GPIO35_VPAC270_PCMCIA_RDY); | ||
54 | if (ret) | ||
55 | goto err3; | ||
56 | |||
57 | ret = gpio_request(GPIO107_VPAC270_PCMCIA_PPEN, "PCMCIA PPEN"); | ||
58 | if (ret) | ||
59 | goto err3; | ||
60 | ret = gpio_direction_output(GPIO107_VPAC270_PCMCIA_PPEN, 0); | ||
61 | if (ret) | ||
62 | goto err4; | ||
63 | |||
64 | ret = gpio_request(GPIO11_VPAC270_PCMCIA_RESET, "PCMCIA RESET"); | ||
65 | if (ret) | ||
66 | goto err4; | ||
67 | ret = gpio_direction_output(GPIO11_VPAC270_PCMCIA_RESET, 0); | ||
68 | if (ret) | ||
69 | goto err5; | ||
70 | 57 | ||
71 | skt->socket.pci_irq = gpio_to_irq(GPIO35_VPAC270_PCMCIA_RDY); | 58 | skt->socket.pci_irq = gpio_to_irq(GPIO35_VPAC270_PCMCIA_RDY); |
72 | 59 | ||
73 | return soc_pcmcia_request_irqs(skt, &cd_irqs[0], 1); | 60 | if (!ret) |
74 | 61 | ret = soc_pcmcia_request_irqs(skt, &cd_irqs[0], 1); | |
75 | err5: | ||
76 | gpio_free(GPIO11_VPAC270_PCMCIA_RESET); | ||
77 | err4: | ||
78 | gpio_free(GPIO107_VPAC270_PCMCIA_PPEN); | ||
79 | err3: | ||
80 | gpio_free(GPIO35_VPAC270_PCMCIA_RDY); | ||
81 | err2: | ||
82 | gpio_free(GPIO84_VPAC270_PCMCIA_CD); | ||
83 | err1: | ||
84 | return ret; | ||
85 | |||
86 | } else { | 62 | } else { |
87 | ret = gpio_request(GPIO17_VPAC270_CF_CD, "CF CD"); | 63 | ret = gpio_request_array(vpac270_cf_gpios, |
88 | if (ret) | 64 | ARRAY_SIZE(vpac270_cf_gpios)); |
89 | goto err6; | ||
90 | ret = gpio_direction_input(GPIO17_VPAC270_CF_CD); | ||
91 | if (ret) | ||
92 | goto err7; | ||
93 | |||
94 | ret = gpio_request(GPIO12_VPAC270_CF_RDY, "CF RDY"); | ||
95 | if (ret) | ||
96 | goto err7; | ||
97 | ret = gpio_direction_input(GPIO12_VPAC270_CF_RDY); | ||
98 | if (ret) | ||
99 | goto err8; | ||
100 | |||
101 | ret = gpio_request(GPIO16_VPAC270_CF_RESET, "CF RESET"); | ||
102 | if (ret) | ||
103 | goto err8; | ||
104 | ret = gpio_direction_output(GPIO16_VPAC270_CF_RESET, 0); | ||
105 | if (ret) | ||
106 | goto err9; | ||
107 | 65 | ||
108 | skt->socket.pci_irq = gpio_to_irq(GPIO12_VPAC270_CF_RDY); | 66 | skt->socket.pci_irq = gpio_to_irq(GPIO12_VPAC270_CF_RDY); |
109 | 67 | ||
110 | return soc_pcmcia_request_irqs(skt, &cd_irqs[1], 1); | 68 | if (!ret) |
111 | 69 | ret = soc_pcmcia_request_irqs(skt, &cd_irqs[1], 1); | |
112 | err9: | ||
113 | gpio_free(GPIO16_VPAC270_CF_RESET); | ||
114 | err8: | ||
115 | gpio_free(GPIO12_VPAC270_CF_RDY); | ||
116 | err7: | ||
117 | gpio_free(GPIO17_VPAC270_CF_CD); | ||
118 | err6: | ||
119 | return ret; | ||
120 | |||
121 | } | 70 | } |
71 | |||
72 | return ret; | ||
122 | } | 73 | } |
123 | 74 | ||
124 | static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) | 75 | static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) |
125 | { | 76 | { |
126 | gpio_free(GPIO11_VPAC270_PCMCIA_RESET); | 77 | if (skt->nr == 0) |
127 | gpio_free(GPIO107_VPAC270_PCMCIA_PPEN); | 78 | gpio_request_array(vpac270_pcmcia_gpios, |
128 | gpio_free(GPIO35_VPAC270_PCMCIA_RDY); | 79 | ARRAY_SIZE(vpac270_pcmcia_gpios)); |
129 | gpio_free(GPIO84_VPAC270_PCMCIA_CD); | 80 | else |
130 | gpio_free(GPIO16_VPAC270_CF_RESET); | 81 | gpio_request_array(vpac270_cf_gpios, |
131 | gpio_free(GPIO12_VPAC270_CF_RDY); | 82 | ARRAY_SIZE(vpac270_cf_gpios)); |
132 | gpio_free(GPIO17_VPAC270_CF_CD); | ||
133 | } | 83 | } |
134 | 84 | ||
135 | static void vpac270_pcmcia_socket_state(struct soc_pcmcia_socket *skt, | 85 | static void vpac270_pcmcia_socket_state(struct soc_pcmcia_socket *skt, |
diff --git a/drivers/pcmcia/sa1100_nanoengine.c b/drivers/pcmcia/sa1100_nanoengine.c index 3d2652e2f5ae..93b9c9ba57c3 100644 --- a/drivers/pcmcia/sa1100_nanoengine.c +++ b/drivers/pcmcia/sa1100_nanoengine.c | |||
@@ -86,7 +86,7 @@ static int nanoengine_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
86 | GPDR &= ~nano_skts[i].input_pins; | 86 | GPDR &= ~nano_skts[i].input_pins; |
87 | GPDR |= nano_skts[i].output_pins; | 87 | GPDR |= nano_skts[i].output_pins; |
88 | GPCR = nano_skts[i].clear_outputs; | 88 | GPCR = nano_skts[i].clear_outputs; |
89 | set_irq_type(nano_skts[i].transition_pins, IRQ_TYPE_EDGE_BOTH); | 89 | irq_set_irq_type(nano_skts[i].transition_pins, IRQ_TYPE_EDGE_BOTH); |
90 | skt->socket.pci_irq = nano_skts[i].pci_irq; | 90 | skt->socket.pci_irq = nano_skts[i].pci_irq; |
91 | 91 | ||
92 | return soc_pcmcia_request_irqs(skt, | 92 | return soc_pcmcia_request_irqs(skt, |
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index 5a9a392eacdf..768f9572a8c8 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c | |||
@@ -155,11 +155,11 @@ static int soc_common_pcmcia_config_skt( | |||
155 | */ | 155 | */ |
156 | if (skt->irq_state != 1 && state->io_irq) { | 156 | if (skt->irq_state != 1 && state->io_irq) { |
157 | skt->irq_state = 1; | 157 | skt->irq_state = 1; |
158 | set_irq_type(skt->socket.pci_irq, | 158 | irq_set_irq_type(skt->socket.pci_irq, |
159 | IRQ_TYPE_EDGE_FALLING); | 159 | IRQ_TYPE_EDGE_FALLING); |
160 | } else if (skt->irq_state == 1 && state->io_irq == 0) { | 160 | } else if (skt->irq_state == 1 && state->io_irq == 0) { |
161 | skt->irq_state = 0; | 161 | skt->irq_state = 0; |
162 | set_irq_type(skt->socket.pci_irq, IRQ_TYPE_NONE); | 162 | irq_set_irq_type(skt->socket.pci_irq, IRQ_TYPE_NONE); |
163 | } | 163 | } |
164 | 164 | ||
165 | skt->cs_state = *state; | 165 | skt->cs_state = *state; |
@@ -537,7 +537,7 @@ int soc_pcmcia_request_irqs(struct soc_pcmcia_socket *skt, | |||
537 | IRQF_DISABLED, irqs[i].str, skt); | 537 | IRQF_DISABLED, irqs[i].str, skt); |
538 | if (res) | 538 | if (res) |
539 | break; | 539 | break; |
540 | set_irq_type(irqs[i].irq, IRQ_TYPE_NONE); | 540 | irq_set_irq_type(irqs[i].irq, IRQ_TYPE_NONE); |
541 | } | 541 | } |
542 | 542 | ||
543 | if (res) { | 543 | if (res) { |
@@ -570,7 +570,7 @@ void soc_pcmcia_disable_irqs(struct soc_pcmcia_socket *skt, | |||
570 | 570 | ||
571 | for (i = 0; i < nr; i++) | 571 | for (i = 0; i < nr; i++) |
572 | if (irqs[i].sock == skt->nr) | 572 | if (irqs[i].sock == skt->nr) |
573 | set_irq_type(irqs[i].irq, IRQ_TYPE_NONE); | 573 | irq_set_irq_type(irqs[i].irq, IRQ_TYPE_NONE); |
574 | } | 574 | } |
575 | EXPORT_SYMBOL(soc_pcmcia_disable_irqs); | 575 | EXPORT_SYMBOL(soc_pcmcia_disable_irqs); |
576 | 576 | ||
@@ -581,8 +581,8 @@ void soc_pcmcia_enable_irqs(struct soc_pcmcia_socket *skt, | |||
581 | 581 | ||
582 | for (i = 0; i < nr; i++) | 582 | for (i = 0; i < nr; i++) |
583 | if (irqs[i].sock == skt->nr) { | 583 | if (irqs[i].sock == skt->nr) { |
584 | set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_RISING); | 584 | irq_set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_RISING); |
585 | set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_BOTH); | 585 | irq_set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_BOTH); |
586 | } | 586 | } |
587 | } | 587 | } |
588 | EXPORT_SYMBOL(soc_pcmcia_enable_irqs); | 588 | EXPORT_SYMBOL(soc_pcmcia_enable_irqs); |
diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c index 3b67a1b6a197..379f4218857d 100644 --- a/drivers/pcmcia/xxs1500_ss.c +++ b/drivers/pcmcia/xxs1500_ss.c | |||
@@ -274,7 +274,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev) | |||
274 | * edge detector. | 274 | * edge detector. |
275 | */ | 275 | */ |
276 | irq = gpio_to_irq(GPIO_CDA); | 276 | irq = gpio_to_irq(GPIO_CDA); |
277 | set_irq_type(irq, IRQ_TYPE_EDGE_BOTH); | 277 | irq_set_irq_type(irq, IRQ_TYPE_EDGE_BOTH); |
278 | ret = request_irq(irq, cdirq, 0, "pcmcia_carddetect", sock); | 278 | ret = request_irq(irq, cdirq, 0, "pcmcia_carddetect", sock); |
279 | if (ret) { | 279 | if (ret) { |
280 | dev_err(&pdev->dev, "cannot setup cd irq\n"); | 280 | dev_err(&pdev->dev, "cannot setup cd irq\n"); |
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c index 61433d492862..d653104b59cb 100644 --- a/drivers/platform/x86/intel_pmic_gpio.c +++ b/drivers/platform/x86/intel_pmic_gpio.c | |||
@@ -257,9 +257,11 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) | |||
257 | } | 257 | } |
258 | 258 | ||
259 | for (i = 0; i < 8; i++) { | 259 | for (i = 0; i < 8; i++) { |
260 | set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip, | 260 | irq_set_chip_and_handler_name(i + pg->irq_base, |
261 | handle_simple_irq, "demux"); | 261 | &pmic_irqchip, |
262 | set_irq_chip_data(i + pg->irq_base, pg); | 262 | handle_simple_irq, |
263 | "demux"); | ||
264 | irq_set_chip_data(i + pg->irq_base, pg); | ||
263 | } | 265 | } |
264 | return 0; | 266 | return 0; |
265 | err: | 267 | err: |
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c index 2a9ab89f83b8..e5ced3a4c1ed 100644 --- a/drivers/power/z2_battery.c +++ b/drivers/power/z2_battery.c | |||
@@ -215,8 +215,8 @@ static int __devinit z2_batt_probe(struct i2c_client *client, | |||
215 | if (ret) | 215 | if (ret) |
216 | goto err2; | 216 | goto err2; |
217 | 217 | ||
218 | set_irq_type(gpio_to_irq(info->charge_gpio), | 218 | irq_set_irq_type(gpio_to_irq(info->charge_gpio), |
219 | IRQ_TYPE_EDGE_BOTH); | 219 | IRQ_TYPE_EDGE_BOTH); |
220 | ret = request_irq(gpio_to_irq(info->charge_gpio), | 220 | ret = request_irq(gpio_to_irq(info->charge_gpio), |
221 | z2_charge_switch_irq, IRQF_DISABLED, | 221 | z2_charge_switch_irq, IRQF_DISABLED, |
222 | "AC Detect", charger); | 222 | "AC Detect", charger); |
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index e55dc1ac83ab..6ac55fd48413 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c | |||
@@ -782,11 +782,11 @@ static void sh_rtc_set_irq_wake(struct device *dev, int enabled) | |||
782 | struct platform_device *pdev = to_platform_device(dev); | 782 | struct platform_device *pdev = to_platform_device(dev); |
783 | struct sh_rtc *rtc = platform_get_drvdata(pdev); | 783 | struct sh_rtc *rtc = platform_get_drvdata(pdev); |
784 | 784 | ||
785 | set_irq_wake(rtc->periodic_irq, enabled); | 785 | irq_set_irq_wake(rtc->periodic_irq, enabled); |
786 | 786 | ||
787 | if (rtc->carry_irq > 0) { | 787 | if (rtc->carry_irq > 0) { |
788 | set_irq_wake(rtc->carry_irq, enabled); | 788 | irq_set_irq_wake(rtc->carry_irq, enabled); |
789 | set_irq_wake(rtc->alarm_irq, enabled); | 789 | irq_set_irq_wake(rtc->alarm_irq, enabled); |
790 | } | 790 | } |
791 | } | 791 | } |
792 | 792 | ||
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index 5833afbf08d7..c6ca115c71df 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c | |||
@@ -63,7 +63,7 @@ void intc_set_prio_level(unsigned int irq, unsigned int level) | |||
63 | 63 | ||
64 | static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) | 64 | static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) |
65 | { | 65 | { |
66 | generic_handle_irq((unsigned int)get_irq_data(irq)); | 66 | generic_handle_irq((unsigned int)irq_get_handler_data(irq)); |
67 | } | 67 | } |
68 | 68 | ||
69 | static void __init intc_register_irq(struct intc_desc *desc, | 69 | static void __init intc_register_irq(struct intc_desc *desc, |
@@ -116,9 +116,9 @@ static void __init intc_register_irq(struct intc_desc *desc, | |||
116 | irq_data = irq_get_irq_data(irq); | 116 | irq_data = irq_get_irq_data(irq); |
117 | 117 | ||
118 | disable_irq_nosync(irq); | 118 | disable_irq_nosync(irq); |
119 | set_irq_chip_and_handler_name(irq, &d->chip, | 119 | irq_set_chip_and_handler_name(irq, &d->chip, handle_level_irq, |
120 | handle_level_irq, "level"); | 120 | "level"); |
121 | set_irq_chip_data(irq, (void *)data[primary]); | 121 | irq_set_chip_data(irq, (void *)data[primary]); |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * set priority level | 124 | * set priority level |
@@ -340,9 +340,9 @@ int __init register_intc_controller(struct intc_desc *desc) | |||
340 | vect2->enum_id = 0; | 340 | vect2->enum_id = 0; |
341 | 341 | ||
342 | /* redirect this interrupts to the first one */ | 342 | /* redirect this interrupts to the first one */ |
343 | set_irq_chip(irq2, &dummy_irq_chip); | 343 | irq_set_chip(irq2, &dummy_irq_chip); |
344 | set_irq_chained_handler(irq2, intc_redirect_irq); | 344 | irq_set_chained_handler(irq2, intc_redirect_irq); |
345 | set_irq_data(irq2, (void *)irq); | 345 | irq_set_handler_data(irq2, (void *)irq); |
346 | } | 346 | } |
347 | } | 347 | } |
348 | 348 | ||
@@ -387,19 +387,16 @@ static int intc_suspend(void) | |||
387 | /* enable wakeup irqs belonging to this intc controller */ | 387 | /* enable wakeup irqs belonging to this intc controller */ |
388 | for_each_active_irq(irq) { | 388 | for_each_active_irq(irq) { |
389 | struct irq_data *data; | 389 | struct irq_data *data; |
390 | struct irq_desc *desc; | ||
391 | struct irq_chip *chip; | 390 | struct irq_chip *chip; |
392 | 391 | ||
393 | data = irq_get_irq_data(irq); | 392 | data = irq_get_irq_data(irq); |
394 | chip = irq_data_get_irq_chip(data); | 393 | chip = irq_data_get_irq_chip(data); |
395 | if (chip != &d->chip) | 394 | if (chip != &d->chip) |
396 | continue; | 395 | continue; |
397 | desc = irq_to_desc(irq); | 396 | if (irqd_is_wakeup_set(data)) |
398 | if ((desc->status & IRQ_WAKEUP)) | ||
399 | chip->irq_enable(data); | 397 | chip->irq_enable(data); |
400 | } | 398 | } |
401 | } | 399 | } |
402 | |||
403 | return 0; | 400 | return 0; |
404 | } | 401 | } |
405 | 402 | ||
@@ -412,7 +409,6 @@ static void intc_resume(void) | |||
412 | 409 | ||
413 | for_each_active_irq(irq) { | 410 | for_each_active_irq(irq) { |
414 | struct irq_data *data; | 411 | struct irq_data *data; |
415 | struct irq_desc *desc; | ||
416 | struct irq_chip *chip; | 412 | struct irq_chip *chip; |
417 | 413 | ||
418 | data = irq_get_irq_data(irq); | 414 | data = irq_get_irq_data(irq); |
@@ -423,8 +419,7 @@ static void intc_resume(void) | |||
423 | */ | 419 | */ |
424 | if (chip != &d->chip) | 420 | if (chip != &d->chip) |
425 | continue; | 421 | continue; |
426 | desc = irq_to_desc(irq); | 422 | if (irqd_irq_disabled(data)) |
427 | if (desc->status & IRQ_DISABLED) | ||
428 | chip->irq_disable(data); | 423 | chip->irq_disable(data); |
429 | else | 424 | else |
430 | chip->irq_enable(data); | 425 | chip->irq_enable(data); |
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h index df36a421e675..5b934851efa8 100644 --- a/drivers/sh/intc/internals.h +++ b/drivers/sh/intc/internals.h | |||
@@ -86,7 +86,7 @@ enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */ | |||
86 | 86 | ||
87 | static inline struct intc_desc_int *get_intc_desc(unsigned int irq) | 87 | static inline struct intc_desc_int *get_intc_desc(unsigned int irq) |
88 | { | 88 | { |
89 | struct irq_chip *chip = get_irq_chip(irq); | 89 | struct irq_chip *chip = irq_get_chip(irq); |
90 | 90 | ||
91 | return container_of(chip, struct intc_desc_int, chip); | 91 | return container_of(chip, struct intc_desc_int, chip); |
92 | } | 92 | } |
@@ -103,7 +103,7 @@ static inline void activate_irq(int irq) | |||
103 | set_irq_flags(irq, IRQF_VALID); | 103 | set_irq_flags(irq, IRQF_VALID); |
104 | #else | 104 | #else |
105 | /* same effect on other architectures */ | 105 | /* same effect on other architectures */ |
106 | set_irq_noprobe(irq); | 106 | irq_set_noprobe(irq); |
107 | #endif | 107 | #endif |
108 | } | 108 | } |
109 | 109 | ||
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c index 4e0ff7181164..ce5f81d7cc6b 100644 --- a/drivers/sh/intc/virq.c +++ b/drivers/sh/intc/virq.c | |||
@@ -110,7 +110,7 @@ static void intc_virq_handler(unsigned int irq, struct irq_desc *desc) | |||
110 | { | 110 | { |
111 | struct irq_data *data = irq_get_irq_data(irq); | 111 | struct irq_data *data = irq_get_irq_data(irq); |
112 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 112 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
113 | struct intc_virq_list *entry, *vlist = irq_data_get_irq_data(data); | 113 | struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data); |
114 | struct intc_desc_int *d = get_intc_desc(irq); | 114 | struct intc_desc_int *d = get_intc_desc(irq); |
115 | 115 | ||
116 | chip->irq_mask_ack(data); | 116 | chip->irq_mask_ack(data); |
@@ -118,7 +118,7 @@ static void intc_virq_handler(unsigned int irq, struct irq_desc *desc) | |||
118 | for_each_virq(entry, vlist) { | 118 | for_each_virq(entry, vlist) { |
119 | unsigned long addr, handle; | 119 | unsigned long addr, handle; |
120 | 120 | ||
121 | handle = (unsigned long)get_irq_data(entry->irq); | 121 | handle = (unsigned long)irq_get_handler_data(entry->irq); |
122 | addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); | 122 | addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); |
123 | 123 | ||
124 | if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) | 124 | if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) |
@@ -229,13 +229,13 @@ restart: | |||
229 | 229 | ||
230 | intc_irq_xlate_set(irq, entry->enum_id, d); | 230 | intc_irq_xlate_set(irq, entry->enum_id, d); |
231 | 231 | ||
232 | set_irq_chip_and_handler_name(irq, get_irq_chip(entry->pirq), | 232 | irq_set_chip_and_handler_name(irq, irq_get_chip(entry->pirq), |
233 | handle_simple_irq, "virq"); | 233 | handle_simple_irq, "virq"); |
234 | set_irq_chip_data(irq, get_irq_chip_data(entry->pirq)); | 234 | irq_set_chip_data(irq, irq_get_chip_data(entry->pirq)); |
235 | 235 | ||
236 | set_irq_data(irq, (void *)entry->handle); | 236 | irq_set_handler_data(irq, (void *)entry->handle); |
237 | 237 | ||
238 | set_irq_chained_handler(entry->pirq, intc_virq_handler); | 238 | irq_set_chained_handler(entry->pirq, intc_virq_handler); |
239 | add_virq_to_pirq(entry->pirq, irq); | 239 | add_virq_to_pirq(entry->pirq, irq); |
240 | 240 | ||
241 | radix_tree_tag_clear(&d->tree, entry->enum_id, | 241 | radix_tree_tag_clear(&d->tree, entry->enum_id, |
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c index e3556ff43bb9..ac5bbc8722e5 100644 --- a/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c +++ b/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c | |||
@@ -341,7 +341,7 @@ int bcmsdh_register_oob_intr(void *dhdp) | |||
341 | if (error) | 341 | if (error) |
342 | return -ENODEV; | 342 | return -ENODEV; |
343 | 343 | ||
344 | set_irq_wake(sdhcinfo->oob_irq, 1); | 344 | irq_set_irq_wake(sdhcinfo->oob_irq, 1); |
345 | sdhcinfo->oob_irq_registered = true; | 345 | sdhcinfo->oob_irq_registered = true; |
346 | } | 346 | } |
347 | 347 | ||
@@ -352,7 +352,7 @@ void bcmsdh_unregister_oob_intr(void) | |||
352 | { | 352 | { |
353 | SDLX_MSG(("%s: Enter\n", __func__)); | 353 | SDLX_MSG(("%s: Enter\n", __func__)); |
354 | 354 | ||
355 | set_irq_wake(sdhcinfo->oob_irq, 0); | 355 | irq_set_irq_wake(sdhcinfo->oob_irq, 0); |
356 | disable_irq(sdhcinfo->oob_irq); /* just in case.. */ | 356 | disable_irq(sdhcinfo->oob_irq); /* just in case.. */ |
357 | free_irq(sdhcinfo->oob_irq, NULL); | 357 | free_irq(sdhcinfo->oob_irq, NULL); |
358 | sdhcinfo->oob_irq_registered = false; | 358 | sdhcinfo->oob_irq_registered = false; |
diff --git a/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c b/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c index ea9b733c3926..21cdb0637beb 100644 --- a/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c +++ b/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c | |||
@@ -597,7 +597,7 @@ static int cy_as_hal_configure_interrupts(void *dev_p) | |||
597 | int result; | 597 | int result; |
598 | int irq_pin = AST_INT; | 598 | int irq_pin = AST_INT; |
599 | 599 | ||
600 | set_irq_type(OMAP_GPIO_IRQ(irq_pin), IRQ_TYPE_LEVEL_LOW); | 600 | irq_set_irq_type(OMAP_GPIO_IRQ(irq_pin), IRQ_TYPE_LEVEL_LOW); |
601 | 601 | ||
602 | /* | 602 | /* |
603 | * for shared IRQS must provide non NULL device ptr | 603 | * for shared IRQS must provide non NULL device ptr |
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index c35f1a73bc8b..52fdf60bdbe2 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c | |||
@@ -178,7 +178,7 @@ static int __init xen_hvc_init(void) | |||
178 | if (xencons_irq < 0) | 178 | if (xencons_irq < 0) |
179 | xencons_irq = 0; /* NO_IRQ */ | 179 | xencons_irq = 0; /* NO_IRQ */ |
180 | else | 180 | else |
181 | set_irq_noprobe(xencons_irq); | 181 | irq_set_noprobe(xencons_irq); |
182 | 182 | ||
183 | hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256); | 183 | hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256); |
184 | if (IS_ERR(hp)) | 184 | if (IS_ERR(hp)) |
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c index 2e7fc9cee9cc..b906f11f7c1a 100644 --- a/drivers/tty/serial/msm_serial_hs.c +++ b/drivers/tty/serial/msm_serial_hs.c | |||
@@ -1644,7 +1644,7 @@ static int __devinit msm_hs_probe(struct platform_device *pdev) | |||
1644 | if (unlikely(uport->irq < 0)) | 1644 | if (unlikely(uport->irq < 0)) |
1645 | return -ENXIO; | 1645 | return -ENXIO; |
1646 | 1646 | ||
1647 | if (unlikely(set_irq_wake(uport->irq, 1))) | 1647 | if (unlikely(irq_set_irq_wake(uport->irq, 1))) |
1648 | return -ENXIO; | 1648 | return -ENXIO; |
1649 | 1649 | ||
1650 | if (pdata == NULL || pdata->rx_wakeup_irq < 0) | 1650 | if (pdata == NULL || pdata->rx_wakeup_irq < 0) |
@@ -1658,7 +1658,7 @@ static int __devinit msm_hs_probe(struct platform_device *pdev) | |||
1658 | if (unlikely(msm_uport->rx_wakeup.irq < 0)) | 1658 | if (unlikely(msm_uport->rx_wakeup.irq < 0)) |
1659 | return -ENXIO; | 1659 | return -ENXIO; |
1660 | 1660 | ||
1661 | if (unlikely(set_irq_wake(msm_uport->rx_wakeup.irq, 1))) | 1661 | if (unlikely(irq_set_irq_wake(msm_uport->rx_wakeup.irq, 1))) |
1662 | return -ENXIO; | 1662 | return -ENXIO; |
1663 | } | 1663 | } |
1664 | 1664 | ||
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c index b37f92cb71bc..444b60aa15e9 100644 --- a/drivers/usb/gadget/pxa25x_udc.c +++ b/drivers/usb/gadget/pxa25x_udc.c | |||
@@ -139,24 +139,6 @@ static const char ep0name [] = "ep0"; | |||
139 | static void pxa25x_ep_fifo_flush (struct usb_ep *ep); | 139 | static void pxa25x_ep_fifo_flush (struct usb_ep *ep); |
140 | static void nuke (struct pxa25x_ep *, int status); | 140 | static void nuke (struct pxa25x_ep *, int status); |
141 | 141 | ||
142 | /* one GPIO should be used to detect VBUS from the host */ | ||
143 | static int is_vbus_present(void) | ||
144 | { | ||
145 | struct pxa2xx_udc_mach_info *mach = the_controller->mach; | ||
146 | |||
147 | if (gpio_is_valid(mach->gpio_vbus)) { | ||
148 | int value = gpio_get_value(mach->gpio_vbus); | ||
149 | |||
150 | if (mach->gpio_vbus_inverted) | ||
151 | return !value; | ||
152 | else | ||
153 | return !!value; | ||
154 | } | ||
155 | if (mach->udc_is_connected) | ||
156 | return mach->udc_is_connected(); | ||
157 | return 1; | ||
158 | } | ||
159 | |||
160 | /* one GPIO should control a D+ pullup, so host sees this device (or not) */ | 142 | /* one GPIO should control a D+ pullup, so host sees this device (or not) */ |
161 | static void pullup_off(void) | 143 | static void pullup_off(void) |
162 | { | 144 | { |
@@ -1055,7 +1037,7 @@ udc_seq_show(struct seq_file *m, void *_d) | |||
1055 | "%s version: %s\nGadget driver: %s\nHost %s\n\n", | 1037 | "%s version: %s\nGadget driver: %s\nHost %s\n\n", |
1056 | driver_name, DRIVER_VERSION SIZE_STR "(pio)", | 1038 | driver_name, DRIVER_VERSION SIZE_STR "(pio)", |
1057 | dev->driver ? dev->driver->driver.name : "(none)", | 1039 | dev->driver ? dev->driver->driver.name : "(none)", |
1058 | is_vbus_present() ? "full speed" : "disconnected"); | 1040 | dev->gadget.speed == USB_SPEED_FULL ? "full speed" : "disconnected"); |
1059 | 1041 | ||
1060 | /* registers for device and ep0 */ | 1042 | /* registers for device and ep0 */ |
1061 | seq_printf(m, | 1043 | seq_printf(m, |
@@ -1094,7 +1076,7 @@ udc_seq_show(struct seq_file *m, void *_d) | |||
1094 | (tmp & UDCCFR_ACM) ? " acm" : ""); | 1076 | (tmp & UDCCFR_ACM) ? " acm" : ""); |
1095 | } | 1077 | } |
1096 | 1078 | ||
1097 | if (!is_vbus_present() || !dev->driver) | 1079 | if (dev->gadget.speed != USB_SPEED_FULL || !dev->driver) |
1098 | goto done; | 1080 | goto done; |
1099 | 1081 | ||
1100 | seq_printf(m, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n", | 1082 | seq_printf(m, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n", |
@@ -1435,14 +1417,6 @@ lubbock_vbus_irq(int irq, void *_dev) | |||
1435 | 1417 | ||
1436 | #endif | 1418 | #endif |
1437 | 1419 | ||
1438 | static irqreturn_t udc_vbus_irq(int irq, void *_dev) | ||
1439 | { | ||
1440 | struct pxa25x_udc *dev = _dev; | ||
1441 | |||
1442 | pxa25x_udc_vbus_session(&dev->gadget, is_vbus_present()); | ||
1443 | return IRQ_HANDLED; | ||
1444 | } | ||
1445 | |||
1446 | 1420 | ||
1447 | /*-------------------------------------------------------------------------*/ | 1421 | /*-------------------------------------------------------------------------*/ |
1448 | 1422 | ||
@@ -1766,12 +1740,9 @@ pxa25x_udc_irq(int irq, void *_dev) | |||
1766 | if (unlikely(udccr & UDCCR_SUSIR)) { | 1740 | if (unlikely(udccr & UDCCR_SUSIR)) { |
1767 | udc_ack_int_UDCCR(UDCCR_SUSIR); | 1741 | udc_ack_int_UDCCR(UDCCR_SUSIR); |
1768 | handled = 1; | 1742 | handled = 1; |
1769 | DBG(DBG_VERBOSE, "USB suspend%s\n", is_vbus_present() | 1743 | DBG(DBG_VERBOSE, "USB suspend\n"); |
1770 | ? "" : "+disconnect"); | ||
1771 | 1744 | ||
1772 | if (!is_vbus_present()) | 1745 | if (dev->gadget.speed != USB_SPEED_UNKNOWN |
1773 | stop_activity(dev, dev->driver); | ||
1774 | else if (dev->gadget.speed != USB_SPEED_UNKNOWN | ||
1775 | && dev->driver | 1746 | && dev->driver |
1776 | && dev->driver->suspend) | 1747 | && dev->driver->suspend) |
1777 | dev->driver->suspend(&dev->gadget); | 1748 | dev->driver->suspend(&dev->gadget); |
@@ -1786,8 +1757,7 @@ pxa25x_udc_irq(int irq, void *_dev) | |||
1786 | 1757 | ||
1787 | if (dev->gadget.speed != USB_SPEED_UNKNOWN | 1758 | if (dev->gadget.speed != USB_SPEED_UNKNOWN |
1788 | && dev->driver | 1759 | && dev->driver |
1789 | && dev->driver->resume | 1760 | && dev->driver->resume) |
1790 | && is_vbus_present()) | ||
1791 | dev->driver->resume(&dev->gadget); | 1761 | dev->driver->resume(&dev->gadget); |
1792 | } | 1762 | } |
1793 | 1763 | ||
@@ -2137,7 +2107,7 @@ static struct pxa25x_udc memory = { | |||
2137 | static int __init pxa25x_udc_probe(struct platform_device *pdev) | 2107 | static int __init pxa25x_udc_probe(struct platform_device *pdev) |
2138 | { | 2108 | { |
2139 | struct pxa25x_udc *dev = &memory; | 2109 | struct pxa25x_udc *dev = &memory; |
2140 | int retval, vbus_irq, irq; | 2110 | int retval, irq; |
2141 | u32 chiprev; | 2111 | u32 chiprev; |
2142 | 2112 | ||
2143 | /* insist on Intel/ARM/XScale */ | 2113 | /* insist on Intel/ARM/XScale */ |
@@ -2199,19 +2169,6 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev) | |||
2199 | 2169 | ||
2200 | dev->transceiver = otg_get_transceiver(); | 2170 | dev->transceiver = otg_get_transceiver(); |
2201 | 2171 | ||
2202 | if (gpio_is_valid(dev->mach->gpio_vbus)) { | ||
2203 | if ((retval = gpio_request(dev->mach->gpio_vbus, | ||
2204 | "pxa25x_udc GPIO VBUS"))) { | ||
2205 | dev_dbg(&pdev->dev, | ||
2206 | "can't get vbus gpio %d, err: %d\n", | ||
2207 | dev->mach->gpio_vbus, retval); | ||
2208 | goto err_gpio_vbus; | ||
2209 | } | ||
2210 | gpio_direction_input(dev->mach->gpio_vbus); | ||
2211 | vbus_irq = gpio_to_irq(dev->mach->gpio_vbus); | ||
2212 | } else | ||
2213 | vbus_irq = 0; | ||
2214 | |||
2215 | if (gpio_is_valid(dev->mach->gpio_pullup)) { | 2172 | if (gpio_is_valid(dev->mach->gpio_pullup)) { |
2216 | if ((retval = gpio_request(dev->mach->gpio_pullup, | 2173 | if ((retval = gpio_request(dev->mach->gpio_pullup, |
2217 | "pca25x_udc GPIO PULLUP"))) { | 2174 | "pca25x_udc GPIO PULLUP"))) { |
@@ -2237,7 +2194,7 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev) | |||
2237 | udc_disable(dev); | 2194 | udc_disable(dev); |
2238 | udc_reinit(dev); | 2195 | udc_reinit(dev); |
2239 | 2196 | ||
2240 | dev->vbus = !!is_vbus_present(); | 2197 | dev->vbus = 0; |
2241 | 2198 | ||
2242 | /* irq setup after old hardware state is cleaned up */ | 2199 | /* irq setup after old hardware state is cleaned up */ |
2243 | retval = request_irq(irq, pxa25x_udc_irq, | 2200 | retval = request_irq(irq, pxa25x_udc_irq, |
@@ -2273,22 +2230,10 @@ lubbock_fail0: | |||
2273 | } | 2230 | } |
2274 | } else | 2231 | } else |
2275 | #endif | 2232 | #endif |
2276 | if (vbus_irq) { | ||
2277 | retval = request_irq(vbus_irq, udc_vbus_irq, | ||
2278 | IRQF_DISABLED | IRQF_SAMPLE_RANDOM | | ||
2279 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, | ||
2280 | driver_name, dev); | ||
2281 | if (retval != 0) { | ||
2282 | pr_err("%s: can't get irq %i, err %d\n", | ||
2283 | driver_name, vbus_irq, retval); | ||
2284 | goto err_vbus_irq; | ||
2285 | } | ||
2286 | } | ||
2287 | create_debug_files(dev); | 2233 | create_debug_files(dev); |
2288 | 2234 | ||
2289 | return 0; | 2235 | return 0; |
2290 | 2236 | ||
2291 | err_vbus_irq: | ||
2292 | #ifdef CONFIG_ARCH_LUBBOCK | 2237 | #ifdef CONFIG_ARCH_LUBBOCK |
2293 | free_irq(LUBBOCK_USB_DISC_IRQ, dev); | 2238 | free_irq(LUBBOCK_USB_DISC_IRQ, dev); |
2294 | err_irq_lub: | 2239 | err_irq_lub: |
@@ -2298,9 +2243,6 @@ lubbock_fail0: | |||
2298 | if (gpio_is_valid(dev->mach->gpio_pullup)) | 2243 | if (gpio_is_valid(dev->mach->gpio_pullup)) |
2299 | gpio_free(dev->mach->gpio_pullup); | 2244 | gpio_free(dev->mach->gpio_pullup); |
2300 | err_gpio_pullup: | 2245 | err_gpio_pullup: |
2301 | if (gpio_is_valid(dev->mach->gpio_vbus)) | ||
2302 | gpio_free(dev->mach->gpio_vbus); | ||
2303 | err_gpio_vbus: | ||
2304 | if (dev->transceiver) { | 2246 | if (dev->transceiver) { |
2305 | otg_put_transceiver(dev->transceiver); | 2247 | otg_put_transceiver(dev->transceiver); |
2306 | dev->transceiver = NULL; | 2248 | dev->transceiver = NULL; |
@@ -2337,10 +2279,6 @@ static int __exit pxa25x_udc_remove(struct platform_device *pdev) | |||
2337 | free_irq(LUBBOCK_USB_IRQ, dev); | 2279 | free_irq(LUBBOCK_USB_IRQ, dev); |
2338 | } | 2280 | } |
2339 | #endif | 2281 | #endif |
2340 | if (gpio_is_valid(dev->mach->gpio_vbus)) { | ||
2341 | free_irq(gpio_to_irq(dev->mach->gpio_vbus), dev); | ||
2342 | gpio_free(dev->mach->gpio_vbus); | ||
2343 | } | ||
2344 | if (gpio_is_valid(dev->mach->gpio_pullup)) | 2282 | if (gpio_is_valid(dev->mach->gpio_pullup)) |
2345 | gpio_free(dev->mach->gpio_pullup); | 2283 | gpio_free(dev->mach->gpio_pullup); |
2346 | 2284 | ||
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index 38193f4e980e..44e4deb362e1 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c | |||
@@ -3832,7 +3832,7 @@ static int oxu_drv_probe(struct platform_device *pdev) | |||
3832 | return -EBUSY; | 3832 | return -EBUSY; |
3833 | } | 3833 | } |
3834 | 3834 | ||
3835 | ret = set_irq_type(irq, IRQF_TRIGGER_FALLING); | 3835 | ret = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING); |
3836 | if (ret) { | 3836 | if (ret) { |
3837 | dev_err(&pdev->dev, "error setting irq type\n"); | 3837 | dev_err(&pdev->dev, "error setting irq type\n"); |
3838 | ret = -EFAULT; | 3838 | ret = -EFAULT; |
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 2ba3b070ed0b..c47aac4a1f98 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c | |||
@@ -943,7 +943,7 @@ static void tusb_musb_enable(struct musb *musb) | |||
943 | musb_writel(tbase, TUSB_INT_CTRL_CONF, | 943 | musb_writel(tbase, TUSB_INT_CTRL_CONF, |
944 | TUSB_INT_CTRL_CONF_INT_RELCYC(0)); | 944 | TUSB_INT_CTRL_CONF_INT_RELCYC(0)); |
945 | 945 | ||
946 | set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW); | 946 | irq_set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW); |
947 | 947 | ||
948 | /* maybe force into the Default-A OTG state machine */ | 948 | /* maybe force into the Default-A OTG state machine */ |
949 | if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT) | 949 | if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT) |
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index 825b665245bb..a2e5b5100ab4 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c | |||
@@ -627,7 +627,12 @@ static void overlay1fb_enable(struct pxafb_layer *ofb) | |||
627 | 627 | ||
628 | static void overlay1fb_disable(struct pxafb_layer *ofb) | 628 | static void overlay1fb_disable(struct pxafb_layer *ofb) |
629 | { | 629 | { |
630 | uint32_t lccr5 = lcd_readl(ofb->fbi, LCCR5); | 630 | uint32_t lccr5; |
631 | |||
632 | if (!(lcd_readl(ofb->fbi, OVL1C1) & OVLxC1_OEN)) | ||
633 | return; | ||
634 | |||
635 | lccr5 = lcd_readl(ofb->fbi, LCCR5); | ||
631 | 636 | ||
632 | lcd_writel(ofb->fbi, OVL1C1, ofb->control[0] & ~OVLxC1_OEN); | 637 | lcd_writel(ofb->fbi, OVL1C1, ofb->control[0] & ~OVLxC1_OEN); |
633 | 638 | ||
@@ -685,7 +690,12 @@ static void overlay2fb_enable(struct pxafb_layer *ofb) | |||
685 | 690 | ||
686 | static void overlay2fb_disable(struct pxafb_layer *ofb) | 691 | static void overlay2fb_disable(struct pxafb_layer *ofb) |
687 | { | 692 | { |
688 | uint32_t lccr5 = lcd_readl(ofb->fbi, LCCR5); | 693 | uint32_t lccr5; |
694 | |||
695 | if (!(lcd_readl(ofb->fbi, OVL2C1) & OVLxC1_OEN)) | ||
696 | return; | ||
697 | |||
698 | lccr5 = lcd_readl(ofb->fbi, LCCR5); | ||
689 | 699 | ||
690 | lcd_writel(ofb->fbi, OVL2C1, ofb->control[0] & ~OVLxC1_OEN); | 700 | lcd_writel(ofb->fbi, OVL2C1, ofb->control[0] & ~OVLxC1_OEN); |
691 | 701 | ||
@@ -720,12 +730,10 @@ static int overlayfb_open(struct fb_info *info, int user) | |||
720 | if (user == 0) | 730 | if (user == 0) |
721 | return -ENODEV; | 731 | return -ENODEV; |
722 | 732 | ||
723 | /* allow only one user at a time */ | 733 | if (ofb->usage++ == 0) |
724 | if (atomic_inc_and_test(&ofb->usage)) | 734 | /* unblank the base framebuffer */ |
725 | return -EBUSY; | 735 | fb_blank(&ofb->fbi->fb, FB_BLANK_UNBLANK); |
726 | 736 | ||
727 | /* unblank the base framebuffer */ | ||
728 | fb_blank(&ofb->fbi->fb, FB_BLANK_UNBLANK); | ||
729 | return 0; | 737 | return 0; |
730 | } | 738 | } |
731 | 739 | ||
@@ -733,12 +741,15 @@ static int overlayfb_release(struct fb_info *info, int user) | |||
733 | { | 741 | { |
734 | struct pxafb_layer *ofb = (struct pxafb_layer*) info; | 742 | struct pxafb_layer *ofb = (struct pxafb_layer*) info; |
735 | 743 | ||
736 | atomic_dec(&ofb->usage); | 744 | if (ofb->usage == 1) { |
737 | ofb->ops->disable(ofb); | 745 | ofb->ops->disable(ofb); |
746 | ofb->fb.var.height = -1; | ||
747 | ofb->fb.var.width = -1; | ||
748 | ofb->fb.var.xres = ofb->fb.var.xres_virtual = 0; | ||
749 | ofb->fb.var.yres = ofb->fb.var.yres_virtual = 0; | ||
738 | 750 | ||
739 | free_pages_exact(ofb->video_mem, ofb->video_mem_size); | 751 | ofb->usage--; |
740 | ofb->video_mem = NULL; | 752 | } |
741 | ofb->video_mem_size = 0; | ||
742 | return 0; | 753 | return 0; |
743 | } | 754 | } |
744 | 755 | ||
@@ -750,7 +761,7 @@ static int overlayfb_check_var(struct fb_var_screeninfo *var, | |||
750 | int xpos, ypos, pfor, bpp; | 761 | int xpos, ypos, pfor, bpp; |
751 | 762 | ||
752 | xpos = NONSTD_TO_XPOS(var->nonstd); | 763 | xpos = NONSTD_TO_XPOS(var->nonstd); |
753 | ypos = NONSTD_TO_XPOS(var->nonstd); | 764 | ypos = NONSTD_TO_YPOS(var->nonstd); |
754 | pfor = NONSTD_TO_PFOR(var->nonstd); | 765 | pfor = NONSTD_TO_PFOR(var->nonstd); |
755 | 766 | ||
756 | bpp = pxafb_var_to_bpp(var); | 767 | bpp = pxafb_var_to_bpp(var); |
@@ -794,7 +805,7 @@ static int overlayfb_check_var(struct fb_var_screeninfo *var, | |||
794 | return 0; | 805 | return 0; |
795 | } | 806 | } |
796 | 807 | ||
797 | static int overlayfb_map_video_memory(struct pxafb_layer *ofb) | 808 | static int overlayfb_check_video_memory(struct pxafb_layer *ofb) |
798 | { | 809 | { |
799 | struct fb_var_screeninfo *var = &ofb->fb.var; | 810 | struct fb_var_screeninfo *var = &ofb->fb.var; |
800 | int pfor = NONSTD_TO_PFOR(var->nonstd); | 811 | int pfor = NONSTD_TO_PFOR(var->nonstd); |
@@ -812,27 +823,11 @@ static int overlayfb_map_video_memory(struct pxafb_layer *ofb) | |||
812 | 823 | ||
813 | size = PAGE_ALIGN(ofb->fb.fix.line_length * var->yres_virtual); | 824 | size = PAGE_ALIGN(ofb->fb.fix.line_length * var->yres_virtual); |
814 | 825 | ||
815 | /* don't re-allocate if the original video memory is enough */ | ||
816 | if (ofb->video_mem) { | 826 | if (ofb->video_mem) { |
817 | if (ofb->video_mem_size >= size) | 827 | if (ofb->video_mem_size >= size) |
818 | return 0; | 828 | return 0; |
819 | |||
820 | free_pages_exact(ofb->video_mem, ofb->video_mem_size); | ||
821 | } | 829 | } |
822 | 830 | return -EINVAL; | |
823 | ofb->video_mem = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); | ||
824 | if (ofb->video_mem == NULL) | ||
825 | return -ENOMEM; | ||
826 | |||
827 | ofb->video_mem_phys = virt_to_phys(ofb->video_mem); | ||
828 | ofb->video_mem_size = size; | ||
829 | |||
830 | mutex_lock(&ofb->fb.mm_lock); | ||
831 | ofb->fb.fix.smem_start = ofb->video_mem_phys; | ||
832 | ofb->fb.fix.smem_len = ofb->fb.fix.line_length * var->yres_virtual; | ||
833 | mutex_unlock(&ofb->fb.mm_lock); | ||
834 | ofb->fb.screen_base = ofb->video_mem; | ||
835 | return 0; | ||
836 | } | 831 | } |
837 | 832 | ||
838 | static int overlayfb_set_par(struct fb_info *info) | 833 | static int overlayfb_set_par(struct fb_info *info) |
@@ -841,13 +836,13 @@ static int overlayfb_set_par(struct fb_info *info) | |||
841 | struct fb_var_screeninfo *var = &info->var; | 836 | struct fb_var_screeninfo *var = &info->var; |
842 | int xpos, ypos, pfor, bpp, ret; | 837 | int xpos, ypos, pfor, bpp, ret; |
843 | 838 | ||
844 | ret = overlayfb_map_video_memory(ofb); | 839 | ret = overlayfb_check_video_memory(ofb); |
845 | if (ret) | 840 | if (ret) |
846 | return ret; | 841 | return ret; |
847 | 842 | ||
848 | bpp = pxafb_var_to_bpp(var); | 843 | bpp = pxafb_var_to_bpp(var); |
849 | xpos = NONSTD_TO_XPOS(var->nonstd); | 844 | xpos = NONSTD_TO_XPOS(var->nonstd); |
850 | ypos = NONSTD_TO_XPOS(var->nonstd); | 845 | ypos = NONSTD_TO_YPOS(var->nonstd); |
851 | pfor = NONSTD_TO_PFOR(var->nonstd); | 846 | pfor = NONSTD_TO_PFOR(var->nonstd); |
852 | 847 | ||
853 | ofb->control[0] = OVLxC1_PPL(var->xres) | OVLxC1_LPO(var->yres) | | 848 | ofb->control[0] = OVLxC1_PPL(var->xres) | OVLxC1_LPO(var->yres) | |
@@ -891,7 +886,7 @@ static void __devinit init_pxafb_overlay(struct pxafb_info *fbi, | |||
891 | 886 | ||
892 | ofb->id = id; | 887 | ofb->id = id; |
893 | ofb->ops = &ofb_ops[id]; | 888 | ofb->ops = &ofb_ops[id]; |
894 | atomic_set(&ofb->usage, 0); | 889 | ofb->usage = 0; |
895 | ofb->fbi = fbi; | 890 | ofb->fbi = fbi; |
896 | init_completion(&ofb->branch_done); | 891 | init_completion(&ofb->branch_done); |
897 | } | 892 | } |
@@ -904,29 +899,60 @@ static inline int pxafb_overlay_supported(void) | |||
904 | return 0; | 899 | return 0; |
905 | } | 900 | } |
906 | 901 | ||
907 | static int __devinit pxafb_overlay_init(struct pxafb_info *fbi) | 902 | static int __devinit pxafb_overlay_map_video_memory(struct pxafb_info *pxafb, |
903 | struct pxafb_layer *ofb) | ||
904 | { | ||
905 | /* We assume that user will use at most video_mem_size for overlay fb, | ||
906 | * anyway, it's useless to use 16bpp main plane and 24bpp overlay | ||
907 | */ | ||
908 | ofb->video_mem = alloc_pages_exact(PAGE_ALIGN(pxafb->video_mem_size), | ||
909 | GFP_KERNEL | __GFP_ZERO); | ||
910 | if (ofb->video_mem == NULL) | ||
911 | return -ENOMEM; | ||
912 | |||
913 | ofb->video_mem_phys = virt_to_phys(ofb->video_mem); | ||
914 | ofb->video_mem_size = PAGE_ALIGN(pxafb->video_mem_size); | ||
915 | |||
916 | mutex_lock(&ofb->fb.mm_lock); | ||
917 | ofb->fb.fix.smem_start = ofb->video_mem_phys; | ||
918 | ofb->fb.fix.smem_len = pxafb->video_mem_size; | ||
919 | mutex_unlock(&ofb->fb.mm_lock); | ||
920 | |||
921 | ofb->fb.screen_base = ofb->video_mem; | ||
922 | |||
923 | return 0; | ||
924 | } | ||
925 | |||
926 | static void __devinit pxafb_overlay_init(struct pxafb_info *fbi) | ||
908 | { | 927 | { |
909 | int i, ret; | 928 | int i, ret; |
910 | 929 | ||
911 | if (!pxafb_overlay_supported()) | 930 | if (!pxafb_overlay_supported()) |
912 | return 0; | 931 | return; |
913 | 932 | ||
914 | for (i = 0; i < 2; i++) { | 933 | for (i = 0; i < 2; i++) { |
915 | init_pxafb_overlay(fbi, &fbi->overlay[i], i); | 934 | struct pxafb_layer *ofb = &fbi->overlay[i]; |
916 | ret = register_framebuffer(&fbi->overlay[i].fb); | 935 | init_pxafb_overlay(fbi, ofb, i); |
936 | ret = register_framebuffer(&ofb->fb); | ||
917 | if (ret) { | 937 | if (ret) { |
918 | dev_err(fbi->dev, "failed to register overlay %d\n", i); | 938 | dev_err(fbi->dev, "failed to register overlay %d\n", i); |
919 | return ret; | 939 | continue; |
920 | } | 940 | } |
941 | ret = pxafb_overlay_map_video_memory(fbi, ofb); | ||
942 | if (ret) { | ||
943 | dev_err(fbi->dev, | ||
944 | "failed to map video memory for overlay %d\n", | ||
945 | i); | ||
946 | unregister_framebuffer(&ofb->fb); | ||
947 | continue; | ||
948 | } | ||
949 | ofb->registered = 1; | ||
921 | } | 950 | } |
922 | 951 | ||
923 | /* mask all IU/BS/EOF/SOF interrupts */ | 952 | /* mask all IU/BS/EOF/SOF interrupts */ |
924 | lcd_writel(fbi, LCCR5, ~0); | 953 | lcd_writel(fbi, LCCR5, ~0); |
925 | 954 | ||
926 | /* place overlay(s) on top of base */ | ||
927 | fbi->lccr0 |= LCCR0_OUC; | ||
928 | pr_info("PXA Overlay driver loaded successfully!\n"); | 955 | pr_info("PXA Overlay driver loaded successfully!\n"); |
929 | return 0; | ||
930 | } | 956 | } |
931 | 957 | ||
932 | static void __devexit pxafb_overlay_exit(struct pxafb_info *fbi) | 958 | static void __devexit pxafb_overlay_exit(struct pxafb_info *fbi) |
@@ -936,8 +962,15 @@ static void __devexit pxafb_overlay_exit(struct pxafb_info *fbi) | |||
936 | if (!pxafb_overlay_supported()) | 962 | if (!pxafb_overlay_supported()) |
937 | return; | 963 | return; |
938 | 964 | ||
939 | for (i = 0; i < 2; i++) | 965 | for (i = 0; i < 2; i++) { |
940 | unregister_framebuffer(&fbi->overlay[i].fb); | 966 | struct pxafb_layer *ofb = &fbi->overlay[i]; |
967 | if (ofb->registered) { | ||
968 | if (ofb->video_mem) | ||
969 | free_pages_exact(ofb->video_mem, | ||
970 | ofb->video_mem_size); | ||
971 | unregister_framebuffer(&ofb->fb); | ||
972 | } | ||
973 | } | ||
941 | } | 974 | } |
942 | #else | 975 | #else |
943 | static inline void pxafb_overlay_init(struct pxafb_info *fbi) {} | 976 | static inline void pxafb_overlay_init(struct pxafb_info *fbi) {} |
@@ -1368,7 +1401,8 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, | |||
1368 | (lcd_readl(fbi, LCCR3) != fbi->reg_lccr3) || | 1401 | (lcd_readl(fbi, LCCR3) != fbi->reg_lccr3) || |
1369 | (lcd_readl(fbi, LCCR4) != fbi->reg_lccr4) || | 1402 | (lcd_readl(fbi, LCCR4) != fbi->reg_lccr4) || |
1370 | (lcd_readl(fbi, FDADR0) != fbi->fdadr[0]) || | 1403 | (lcd_readl(fbi, FDADR0) != fbi->fdadr[0]) || |
1371 | (lcd_readl(fbi, FDADR1) != fbi->fdadr[1])) | 1404 | ((fbi->lccr0 & LCCR0_SDS) && |
1405 | (lcd_readl(fbi, FDADR1) != fbi->fdadr[1]))) | ||
1372 | pxafb_schedule_work(fbi, C_REENABLE); | 1406 | pxafb_schedule_work(fbi, C_REENABLE); |
1373 | 1407 | ||
1374 | return 0; | 1408 | return 0; |
@@ -1420,7 +1454,8 @@ static void pxafb_enable_controller(struct pxafb_info *fbi) | |||
1420 | lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB); | 1454 | lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB); |
1421 | 1455 | ||
1422 | lcd_writel(fbi, FDADR0, fbi->fdadr[0]); | 1456 | lcd_writel(fbi, FDADR0, fbi->fdadr[0]); |
1423 | lcd_writel(fbi, FDADR1, fbi->fdadr[1]); | 1457 | if (fbi->lccr0 & LCCR0_SDS) |
1458 | lcd_writel(fbi, FDADR1, fbi->fdadr[1]); | ||
1424 | lcd_writel(fbi, LCCR0, fbi->reg_lccr0 | LCCR0_ENB); | 1459 | lcd_writel(fbi, LCCR0, fbi->reg_lccr0 | LCCR0_ENB); |
1425 | } | 1460 | } |
1426 | 1461 | ||
@@ -1613,7 +1648,8 @@ pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data) | |||
1613 | 1648 | ||
1614 | switch (val) { | 1649 | switch (val) { |
1615 | case CPUFREQ_PRECHANGE: | 1650 | case CPUFREQ_PRECHANGE: |
1616 | set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE); | 1651 | if (!fbi->overlay[0].usage && !fbi->overlay[1].usage) |
1652 | set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE); | ||
1617 | break; | 1653 | break; |
1618 | 1654 | ||
1619 | case CPUFREQ_POSTCHANGE: | 1655 | case CPUFREQ_POSTCHANGE: |
@@ -1806,6 +1842,12 @@ static struct pxafb_info * __devinit pxafb_init_fbinfo(struct device *dev) | |||
1806 | 1842 | ||
1807 | pxafb_decode_mach_info(fbi, inf); | 1843 | pxafb_decode_mach_info(fbi, inf); |
1808 | 1844 | ||
1845 | #ifdef CONFIG_FB_PXA_OVERLAY | ||
1846 | /* place overlay(s) on top of base */ | ||
1847 | if (pxafb_overlay_supported()) | ||
1848 | fbi->lccr0 |= LCCR0_OUC; | ||
1849 | #endif | ||
1850 | |||
1809 | init_waitqueue_head(&fbi->ctrlr_wait); | 1851 | init_waitqueue_head(&fbi->ctrlr_wait); |
1810 | INIT_WORK(&fbi->task, pxafb_task); | 1852 | INIT_WORK(&fbi->task, pxafb_task); |
1811 | mutex_init(&fbi->ctrlr_lock); | 1853 | mutex_init(&fbi->ctrlr_lock); |
diff --git a/drivers/video/pxafb.h b/drivers/video/pxafb.h index 2353521c5c8c..26ba9fa3f737 100644 --- a/drivers/video/pxafb.h +++ b/drivers/video/pxafb.h | |||
@@ -92,7 +92,8 @@ struct pxafb_layer_ops { | |||
92 | struct pxafb_layer { | 92 | struct pxafb_layer { |
93 | struct fb_info fb; | 93 | struct fb_info fb; |
94 | int id; | 94 | int id; |
95 | atomic_t usage; | 95 | int registered; |
96 | uint32_t usage; | ||
96 | uint32_t control[2]; | 97 | uint32_t control[2]; |
97 | 98 | ||
98 | struct pxafb_layer_ops *ops; | 99 | struct pxafb_layer_ops *ops; |
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c index 95921b77cf86..2f4fa02744a5 100644 --- a/drivers/w1/masters/ds1wm.c +++ b/drivers/w1/masters/ds1wm.c | |||
@@ -368,9 +368,9 @@ static int ds1wm_probe(struct platform_device *pdev) | |||
368 | ds1wm_data->active_high = plat->active_high; | 368 | ds1wm_data->active_high = plat->active_high; |
369 | 369 | ||
370 | if (res->flags & IORESOURCE_IRQ_HIGHEDGE) | 370 | if (res->flags & IORESOURCE_IRQ_HIGHEDGE) |
371 | set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING); | 371 | irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING); |
372 | if (res->flags & IORESOURCE_IRQ_LOWEDGE) | 372 | if (res->flags & IORESOURCE_IRQ_LOWEDGE) |
373 | set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING); | 373 | irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING); |
374 | 374 | ||
375 | ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED, | 375 | ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED, |
376 | "ds1wm", ds1wm_data); | 376 | "ds1wm", ds1wm_data); |
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c index 596ba604e78d..51b5551b4e3f 100644 --- a/drivers/watchdog/davinci_wdt.c +++ b/drivers/watchdog/davinci_wdt.c | |||
@@ -202,7 +202,6 @@ static struct miscdevice davinci_wdt_miscdev = { | |||
202 | static int __devinit davinci_wdt_probe(struct platform_device *pdev) | 202 | static int __devinit davinci_wdt_probe(struct platform_device *pdev) |
203 | { | 203 | { |
204 | int ret = 0, size; | 204 | int ret = 0, size; |
205 | struct resource *res; | ||
206 | struct device *dev = &pdev->dev; | 205 | struct device *dev = &pdev->dev; |
207 | 206 | ||
208 | wdt_clk = clk_get(dev, NULL); | 207 | wdt_clk = clk_get(dev, NULL); |
@@ -216,31 +215,31 @@ static int __devinit davinci_wdt_probe(struct platform_device *pdev) | |||
216 | 215 | ||
217 | dev_info(dev, "heartbeat %d sec\n", heartbeat); | 216 | dev_info(dev, "heartbeat %d sec\n", heartbeat); |
218 | 217 | ||
219 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 218 | wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
220 | if (res == NULL) { | 219 | if (wdt_mem == NULL) { |
221 | dev_err(dev, "failed to get memory region resource\n"); | 220 | dev_err(dev, "failed to get memory region resource\n"); |
222 | return -ENOENT; | 221 | return -ENOENT; |
223 | } | 222 | } |
224 | 223 | ||
225 | size = resource_size(res); | 224 | size = resource_size(wdt_mem); |
226 | wdt_mem = request_mem_region(res->start, size, pdev->name); | 225 | if (!request_mem_region(wdt_mem->start, size, pdev->name)) { |
227 | |||
228 | if (wdt_mem == NULL) { | ||
229 | dev_err(dev, "failed to get memory region\n"); | 226 | dev_err(dev, "failed to get memory region\n"); |
230 | return -ENOENT; | 227 | return -ENOENT; |
231 | } | 228 | } |
232 | 229 | ||
233 | wdt_base = ioremap(res->start, size); | 230 | wdt_base = ioremap(wdt_mem->start, size); |
234 | if (!wdt_base) { | 231 | if (!wdt_base) { |
235 | dev_err(dev, "failed to map memory region\n"); | 232 | dev_err(dev, "failed to map memory region\n"); |
233 | release_mem_region(wdt_mem->start, size); | ||
234 | wdt_mem = NULL; | ||
236 | return -ENOMEM; | 235 | return -ENOMEM; |
237 | } | 236 | } |
238 | 237 | ||
239 | ret = misc_register(&davinci_wdt_miscdev); | 238 | ret = misc_register(&davinci_wdt_miscdev); |
240 | if (ret < 0) { | 239 | if (ret < 0) { |
241 | dev_err(dev, "cannot register misc device\n"); | 240 | dev_err(dev, "cannot register misc device\n"); |
242 | release_resource(wdt_mem); | 241 | release_mem_region(wdt_mem->start, size); |
243 | kfree(wdt_mem); | 242 | wdt_mem = NULL; |
244 | } else { | 243 | } else { |
245 | set_bit(WDT_DEVICE_INITED, &wdt_status); | 244 | set_bit(WDT_DEVICE_INITED, &wdt_status); |
246 | } | 245 | } |
@@ -253,8 +252,7 @@ static int __devexit davinci_wdt_remove(struct platform_device *pdev) | |||
253 | { | 252 | { |
254 | misc_deregister(&davinci_wdt_miscdev); | 253 | misc_deregister(&davinci_wdt_miscdev); |
255 | if (wdt_mem) { | 254 | if (wdt_mem) { |
256 | release_resource(wdt_mem); | 255 | release_mem_region(wdt_mem->start, resource_size(wdt_mem)); |
257 | kfree(wdt_mem); | ||
258 | wdt_mem = NULL; | 256 | wdt_mem = NULL; |
259 | } | 257 | } |
260 | 258 | ||
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c index 7a82ce5a6337..73ba2fd8e591 100644 --- a/drivers/watchdog/max63xx_wdt.c +++ b/drivers/watchdog/max63xx_wdt.c | |||
@@ -270,7 +270,6 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) | |||
270 | { | 270 | { |
271 | int ret = 0; | 271 | int ret = 0; |
272 | int size; | 272 | int size; |
273 | struct resource *res; | ||
274 | struct device *dev = &pdev->dev; | 273 | struct device *dev = &pdev->dev; |
275 | struct max63xx_timeout *table; | 274 | struct max63xx_timeout *table; |
276 | 275 | ||
@@ -294,21 +293,19 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) | |||
294 | 293 | ||
295 | max63xx_pdev = pdev; | 294 | max63xx_pdev = pdev; |
296 | 295 | ||
297 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 296 | wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
298 | if (res == NULL) { | 297 | if (wdt_mem == NULL) { |
299 | dev_err(dev, "failed to get memory region resource\n"); | 298 | dev_err(dev, "failed to get memory region resource\n"); |
300 | return -ENOENT; | 299 | return -ENOENT; |
301 | } | 300 | } |
302 | 301 | ||
303 | size = resource_size(res); | 302 | size = resource_size(wdt_mem); |
304 | wdt_mem = request_mem_region(res->start, size, pdev->name); | 303 | if (!request_mem_region(wdt_mem->start, size, pdev->name)) { |
305 | |||
306 | if (wdt_mem == NULL) { | ||
307 | dev_err(dev, "failed to get memory region\n"); | 304 | dev_err(dev, "failed to get memory region\n"); |
308 | return -ENOENT; | 305 | return -ENOENT; |
309 | } | 306 | } |
310 | 307 | ||
311 | wdt_base = ioremap(res->start, size); | 308 | wdt_base = ioremap(wdt_mem->start, size); |
312 | if (!wdt_base) { | 309 | if (!wdt_base) { |
313 | dev_err(dev, "failed to map memory region\n"); | 310 | dev_err(dev, "failed to map memory region\n"); |
314 | ret = -ENOMEM; | 311 | ret = -ENOMEM; |
@@ -326,8 +323,8 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) | |||
326 | out_unmap: | 323 | out_unmap: |
327 | iounmap(wdt_base); | 324 | iounmap(wdt_base); |
328 | out_request: | 325 | out_request: |
329 | release_resource(wdt_mem); | 326 | release_mem_region(wdt_mem->start, size); |
330 | kfree(wdt_mem); | 327 | wdt_mem = NULL; |
331 | 328 | ||
332 | return ret; | 329 | return ret; |
333 | } | 330 | } |
@@ -336,8 +333,7 @@ static int __devexit max63xx_wdt_remove(struct platform_device *pdev) | |||
336 | { | 333 | { |
337 | misc_deregister(&max63xx_wdt_miscdev); | 334 | misc_deregister(&max63xx_wdt_miscdev); |
338 | if (wdt_mem) { | 335 | if (wdt_mem) { |
339 | release_resource(wdt_mem); | 336 | release_mem_region(wdt_mem->start, resource_size(wdt_mem)); |
340 | kfree(wdt_mem); | ||
341 | wdt_mem = NULL; | 337 | wdt_mem = NULL; |
342 | } | 338 | } |
343 | 339 | ||
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c index 267377a5a83e..afa78a54711e 100644 --- a/drivers/watchdog/nv_tco.c +++ b/drivers/watchdog/nv_tco.c | |||
@@ -302,7 +302,7 @@ MODULE_DEVICE_TABLE(pci, tco_pci_tbl); | |||
302 | * Init & exit routines | 302 | * Init & exit routines |
303 | */ | 303 | */ |
304 | 304 | ||
305 | static unsigned char __init nv_tco_getdevice(void) | 305 | static unsigned char __devinit nv_tco_getdevice(void) |
306 | { | 306 | { |
307 | struct pci_dev *dev = NULL; | 307 | struct pci_dev *dev = NULL; |
308 | u32 val; | 308 | u32 val; |
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index c7cf4cbf8ab3..614933225560 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c | |||
@@ -254,7 +254,6 @@ static struct miscdevice pnx4008_wdt_miscdev = { | |||
254 | static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) | 254 | static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) |
255 | { | 255 | { |
256 | int ret = 0, size; | 256 | int ret = 0, size; |
257 | struct resource *res; | ||
258 | 257 | ||
259 | if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT) | 258 | if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT) |
260 | heartbeat = DEFAULT_HEARTBEAT; | 259 | heartbeat = DEFAULT_HEARTBEAT; |
@@ -262,42 +261,42 @@ static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) | |||
262 | printk(KERN_INFO MODULE_NAME | 261 | printk(KERN_INFO MODULE_NAME |
263 | "PNX4008 Watchdog Timer: heartbeat %d sec\n", heartbeat); | 262 | "PNX4008 Watchdog Timer: heartbeat %d sec\n", heartbeat); |
264 | 263 | ||
265 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 264 | wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
266 | if (res == NULL) { | 265 | if (wdt_mem == NULL) { |
267 | printk(KERN_INFO MODULE_NAME | 266 | printk(KERN_INFO MODULE_NAME |
268 | "failed to get memory region resouce\n"); | 267 | "failed to get memory region resouce\n"); |
269 | return -ENOENT; | 268 | return -ENOENT; |
270 | } | 269 | } |
271 | 270 | ||
272 | size = resource_size(res); | 271 | size = resource_size(wdt_mem); |
273 | wdt_mem = request_mem_region(res->start, size, pdev->name); | ||
274 | 272 | ||
275 | if (wdt_mem == NULL) { | 273 | if (!request_mem_region(wdt_mem->start, size, pdev->name)) { |
276 | printk(KERN_INFO MODULE_NAME "failed to get memory region\n"); | 274 | printk(KERN_INFO MODULE_NAME "failed to get memory region\n"); |
277 | return -ENOENT; | 275 | return -ENOENT; |
278 | } | 276 | } |
279 | wdt_base = (void __iomem *)IO_ADDRESS(res->start); | 277 | wdt_base = (void __iomem *)IO_ADDRESS(wdt_mem->start); |
280 | 278 | ||
281 | wdt_clk = clk_get(&pdev->dev, NULL); | 279 | wdt_clk = clk_get(&pdev->dev, NULL); |
282 | if (IS_ERR(wdt_clk)) { | 280 | if (IS_ERR(wdt_clk)) { |
283 | ret = PTR_ERR(wdt_clk); | 281 | ret = PTR_ERR(wdt_clk); |
284 | release_resource(wdt_mem); | 282 | release_mem_region(wdt_mem->start, size); |
285 | kfree(wdt_mem); | 283 | wdt_mem = NULL; |
286 | goto out; | 284 | goto out; |
287 | } | 285 | } |
288 | 286 | ||
289 | ret = clk_enable(wdt_clk); | 287 | ret = clk_enable(wdt_clk); |
290 | if (ret) { | 288 | if (ret) { |
291 | release_resource(wdt_mem); | 289 | release_mem_region(wdt_mem->start, size); |
292 | kfree(wdt_mem); | 290 | wdt_mem = NULL; |
291 | clk_put(wdt_clk); | ||
293 | goto out; | 292 | goto out; |
294 | } | 293 | } |
295 | 294 | ||
296 | ret = misc_register(&pnx4008_wdt_miscdev); | 295 | ret = misc_register(&pnx4008_wdt_miscdev); |
297 | if (ret < 0) { | 296 | if (ret < 0) { |
298 | printk(KERN_ERR MODULE_NAME "cannot register misc device\n"); | 297 | printk(KERN_ERR MODULE_NAME "cannot register misc device\n"); |
299 | release_resource(wdt_mem); | 298 | release_mem_region(wdt_mem->start, size); |
300 | kfree(wdt_mem); | 299 | wdt_mem = NULL; |
301 | clk_disable(wdt_clk); | 300 | clk_disable(wdt_clk); |
302 | clk_put(wdt_clk); | 301 | clk_put(wdt_clk); |
303 | } else { | 302 | } else { |
@@ -320,8 +319,7 @@ static int __devexit pnx4008_wdt_remove(struct platform_device *pdev) | |||
320 | clk_put(wdt_clk); | 319 | clk_put(wdt_clk); |
321 | 320 | ||
322 | if (wdt_mem) { | 321 | if (wdt_mem) { |
323 | release_resource(wdt_mem); | 322 | release_mem_region(wdt_mem->start, resource_size(wdt_mem)); |
324 | kfree(wdt_mem); | ||
325 | wdt_mem = NULL; | 323 | wdt_mem = NULL; |
326 | } | 324 | } |
327 | return 0; | 325 | return 0; |
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 25b39bf35925..f7f5aa00df60 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c | |||
@@ -402,7 +402,6 @@ static inline void s3c2410wdt_cpufreq_deregister(void) | |||
402 | 402 | ||
403 | static int __devinit s3c2410wdt_probe(struct platform_device *pdev) | 403 | static int __devinit s3c2410wdt_probe(struct platform_device *pdev) |
404 | { | 404 | { |
405 | struct resource *res; | ||
406 | struct device *dev; | 405 | struct device *dev; |
407 | unsigned int wtcon; | 406 | unsigned int wtcon; |
408 | int started = 0; | 407 | int started = 0; |
@@ -416,20 +415,19 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev) | |||
416 | 415 | ||
417 | /* get the memory region for the watchdog timer */ | 416 | /* get the memory region for the watchdog timer */ |
418 | 417 | ||
419 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 418 | wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
420 | if (res == NULL) { | 419 | if (wdt_mem == NULL) { |
421 | dev_err(dev, "no memory resource specified\n"); | 420 | dev_err(dev, "no memory resource specified\n"); |
422 | return -ENOENT; | 421 | return -ENOENT; |
423 | } | 422 | } |
424 | 423 | ||
425 | size = resource_size(res); | 424 | size = resource_size(wdt_mem); |
426 | wdt_mem = request_mem_region(res->start, size, pdev->name); | 425 | if (!request_mem_region(wdt_mem->start, size, pdev->name)) { |
427 | if (wdt_mem == NULL) { | ||
428 | dev_err(dev, "failed to get memory region\n"); | 426 | dev_err(dev, "failed to get memory region\n"); |
429 | return -EBUSY; | 427 | return -EBUSY; |
430 | } | 428 | } |
431 | 429 | ||
432 | wdt_base = ioremap(res->start, size); | 430 | wdt_base = ioremap(wdt_mem->start, size); |
433 | if (wdt_base == NULL) { | 431 | if (wdt_base == NULL) { |
434 | dev_err(dev, "failed to ioremap() region\n"); | 432 | dev_err(dev, "failed to ioremap() region\n"); |
435 | ret = -EINVAL; | 433 | ret = -EINVAL; |
@@ -524,8 +522,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev) | |||
524 | iounmap(wdt_base); | 522 | iounmap(wdt_base); |
525 | 523 | ||
526 | err_req: | 524 | err_req: |
527 | release_resource(wdt_mem); | 525 | release_mem_region(wdt_mem->start, size); |
528 | kfree(wdt_mem); | 526 | wdt_mem = NULL; |
529 | 527 | ||
530 | return ret; | 528 | return ret; |
531 | } | 529 | } |
@@ -545,8 +543,7 @@ static int __devexit s3c2410wdt_remove(struct platform_device *dev) | |||
545 | 543 | ||
546 | iounmap(wdt_base); | 544 | iounmap(wdt_base); |
547 | 545 | ||
548 | release_resource(wdt_mem); | 546 | release_mem_region(wdt_mem->start, resource_size(wdt_mem)); |
549 | kfree(wdt_mem); | ||
550 | wdt_mem = NULL; | 547 | wdt_mem = NULL; |
551 | return 0; | 548 | return 0; |
552 | } | 549 | } |
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c index 100b114e3c3c..bf16ffb4d21e 100644 --- a/drivers/watchdog/softdog.c +++ b/drivers/watchdog/softdog.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/init.h> | 48 | #include <linux/init.h> |
49 | #include <linux/jiffies.h> | 49 | #include <linux/jiffies.h> |
50 | #include <linux/uaccess.h> | 50 | #include <linux/uaccess.h> |
51 | #include <linux/kernel.h> | ||
51 | 52 | ||
52 | #define PFX "SoftDog: " | 53 | #define PFX "SoftDog: " |
53 | 54 | ||
@@ -75,6 +76,11 @@ MODULE_PARM_DESC(soft_noboot, | |||
75 | "Softdog action, set to 1 to ignore reboots, 0 to reboot " | 76 | "Softdog action, set to 1 to ignore reboots, 0 to reboot " |
76 | "(default depends on ONLY_TESTING)"); | 77 | "(default depends on ONLY_TESTING)"); |
77 | 78 | ||
79 | static int soft_panic; | ||
80 | module_param(soft_panic, int, 0); | ||
81 | MODULE_PARM_DESC(soft_panic, | ||
82 | "Softdog action, set to 1 to panic, 0 to reboot (default=0)"); | ||
83 | |||
78 | /* | 84 | /* |
79 | * Our timer | 85 | * Our timer |
80 | */ | 86 | */ |
@@ -98,7 +104,10 @@ static void watchdog_fire(unsigned long data) | |||
98 | 104 | ||
99 | if (soft_noboot) | 105 | if (soft_noboot) |
100 | printk(KERN_CRIT PFX "Triggered - Reboot ignored.\n"); | 106 | printk(KERN_CRIT PFX "Triggered - Reboot ignored.\n"); |
101 | else { | 107 | else if (soft_panic) { |
108 | printk(KERN_CRIT PFX "Initiating panic.\n"); | ||
109 | panic("Software Watchdog Timer expired."); | ||
110 | } else { | ||
102 | printk(KERN_CRIT PFX "Initiating system reboot.\n"); | 111 | printk(KERN_CRIT PFX "Initiating system reboot.\n"); |
103 | emergency_restart(); | 112 | emergency_restart(); |
104 | printk(KERN_CRIT PFX "Reboot didn't ?????\n"); | 113 | printk(KERN_CRIT PFX "Reboot didn't ?????\n"); |
@@ -267,7 +276,8 @@ static struct notifier_block softdog_notifier = { | |||
267 | }; | 276 | }; |
268 | 277 | ||
269 | static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.07 " | 278 | static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.07 " |
270 | "initialized. soft_noboot=%d soft_margin=%d sec (nowayout= %d)\n"; | 279 | "initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d " |
280 | "(nowayout= %d)\n"; | ||
271 | 281 | ||
272 | static int __init watchdog_init(void) | 282 | static int __init watchdog_init(void) |
273 | { | 283 | { |
@@ -298,7 +308,7 @@ static int __init watchdog_init(void) | |||
298 | return ret; | 308 | return ret; |
299 | } | 309 | } |
300 | 310 | ||
301 | printk(banner, soft_noboot, soft_margin, nowayout); | 311 | printk(banner, soft_noboot, soft_margin, soft_panic, nowayout); |
302 | 312 | ||
303 | return 0; | 313 | return 0; |
304 | } | 314 | } |
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index 1bc493848ed4..87e0527669d8 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #define PFX TCO_MODULE_NAME ": " | 42 | #define PFX TCO_MODULE_NAME ": " |
43 | 43 | ||
44 | /* internal variables */ | 44 | /* internal variables */ |
45 | static u32 tcobase_phys; | ||
45 | static void __iomem *tcobase; | 46 | static void __iomem *tcobase; |
46 | static unsigned int pm_iobase; | 47 | static unsigned int pm_iobase; |
47 | static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */ | 48 | static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */ |
@@ -305,10 +306,18 @@ static unsigned char __devinit sp5100_tco_setupdevice(void) | |||
305 | /* Low three bits of BASE0 are reserved. */ | 306 | /* Low three bits of BASE0 are reserved. */ |
306 | val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8); | 307 | val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8); |
307 | 308 | ||
309 | if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE, | ||
310 | "SP5100 TCO")) { | ||
311 | printk(KERN_ERR PFX "mmio address 0x%04x already in use\n", | ||
312 | val); | ||
313 | goto unreg_region; | ||
314 | } | ||
315 | tcobase_phys = val; | ||
316 | |||
308 | tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE); | 317 | tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE); |
309 | if (tcobase == 0) { | 318 | if (tcobase == 0) { |
310 | printk(KERN_ERR PFX "failed to get tcobase address\n"); | 319 | printk(KERN_ERR PFX "failed to get tcobase address\n"); |
311 | goto unreg_region; | 320 | goto unreg_mem_region; |
312 | } | 321 | } |
313 | 322 | ||
314 | /* Enable watchdog decode bit */ | 323 | /* Enable watchdog decode bit */ |
@@ -346,7 +355,8 @@ static unsigned char __devinit sp5100_tco_setupdevice(void) | |||
346 | /* Done */ | 355 | /* Done */ |
347 | return 1; | 356 | return 1; |
348 | 357 | ||
349 | iounmap(tcobase); | 358 | unreg_mem_region: |
359 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); | ||
350 | unreg_region: | 360 | unreg_region: |
351 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); | 361 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); |
352 | exit: | 362 | exit: |
@@ -401,6 +411,7 @@ static int __devinit sp5100_tco_init(struct platform_device *dev) | |||
401 | 411 | ||
402 | exit: | 412 | exit: |
403 | iounmap(tcobase); | 413 | iounmap(tcobase); |
414 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); | ||
404 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); | 415 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); |
405 | return ret; | 416 | return ret; |
406 | } | 417 | } |
@@ -414,6 +425,7 @@ static void __devexit sp5100_tco_cleanup(void) | |||
414 | /* Deregister */ | 425 | /* Deregister */ |
415 | misc_deregister(&sp5100_tco_miscdev); | 426 | misc_deregister(&sp5100_tco_miscdev); |
416 | iounmap(tcobase); | 427 | iounmap(tcobase); |
428 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); | ||
417 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); | 429 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); |
418 | } | 430 | } |
419 | 431 | ||
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 02b5a9c05cfa..036343ba204e 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -122,7 +122,7 @@ static struct irq_chip xen_pirq_chip; | |||
122 | /* Get info for IRQ */ | 122 | /* Get info for IRQ */ |
123 | static struct irq_info *info_for_irq(unsigned irq) | 123 | static struct irq_info *info_for_irq(unsigned irq) |
124 | { | 124 | { |
125 | return get_irq_data(irq); | 125 | return irq_get_handler_data(irq); |
126 | } | 126 | } |
127 | 127 | ||
128 | /* Constructors for packed IRQ information. */ | 128 | /* Constructors for packed IRQ information. */ |
@@ -403,7 +403,7 @@ static void xen_irq_init(unsigned irq) | |||
403 | 403 | ||
404 | info->type = IRQT_UNBOUND; | 404 | info->type = IRQT_UNBOUND; |
405 | 405 | ||
406 | set_irq_data(irq, info); | 406 | irq_set_handler_data(irq, info); |
407 | 407 | ||
408 | list_add_tail(&info->list, &xen_irq_list_head); | 408 | list_add_tail(&info->list, &xen_irq_list_head); |
409 | } | 409 | } |
@@ -458,11 +458,11 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) | |||
458 | 458 | ||
459 | static void xen_free_irq(unsigned irq) | 459 | static void xen_free_irq(unsigned irq) |
460 | { | 460 | { |
461 | struct irq_info *info = get_irq_data(irq); | 461 | struct irq_info *info = irq_get_handler_data(irq); |
462 | 462 | ||
463 | list_del(&info->list); | 463 | list_del(&info->list); |
464 | 464 | ||
465 | set_irq_data(irq, NULL); | 465 | irq_set_handler_data(irq, NULL); |
466 | 466 | ||
467 | kfree(info); | 467 | kfree(info); |
468 | 468 | ||
@@ -585,7 +585,7 @@ static void ack_pirq(struct irq_data *data) | |||
585 | { | 585 | { |
586 | int evtchn = evtchn_from_irq(data->irq); | 586 | int evtchn = evtchn_from_irq(data->irq); |
587 | 587 | ||
588 | move_native_irq(data->irq); | 588 | irq_move_irq(data); |
589 | 589 | ||
590 | if (VALID_EVTCHN(evtchn)) { | 590 | if (VALID_EVTCHN(evtchn)) { |
591 | mask_evtchn(evtchn); | 591 | mask_evtchn(evtchn); |
@@ -639,8 +639,8 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
639 | if (irq < 0) | 639 | if (irq < 0) |
640 | goto out; | 640 | goto out; |
641 | 641 | ||
642 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, | 642 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, |
643 | handle_level_irq, name); | 643 | name); |
644 | 644 | ||
645 | irq_op.irq = irq; | 645 | irq_op.irq = irq; |
646 | irq_op.vector = 0; | 646 | irq_op.vector = 0; |
@@ -690,8 +690,8 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | |||
690 | if (irq == -1) | 690 | if (irq == -1) |
691 | goto out; | 691 | goto out; |
692 | 692 | ||
693 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, | 693 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, |
694 | handle_level_irq, name); | 694 | name); |
695 | 695 | ||
696 | xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); | 696 | xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); |
697 | ret = irq_set_msi_desc(irq, msidesc); | 697 | ret = irq_set_msi_desc(irq, msidesc); |
@@ -772,7 +772,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
772 | if (irq == -1) | 772 | if (irq == -1) |
773 | goto out; | 773 | goto out; |
774 | 774 | ||
775 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 775 | irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, |
776 | handle_fasteoi_irq, "event"); | 776 | handle_fasteoi_irq, "event"); |
777 | 777 | ||
778 | xen_irq_info_evtchn_init(irq, evtchn); | 778 | xen_irq_info_evtchn_init(irq, evtchn); |
@@ -799,7 +799,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
799 | if (irq < 0) | 799 | if (irq < 0) |
800 | goto out; | 800 | goto out; |
801 | 801 | ||
802 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, | 802 | irq_set_chip_and_handler_name(irq, &xen_percpu_chip, |
803 | handle_percpu_irq, "ipi"); | 803 | handle_percpu_irq, "ipi"); |
804 | 804 | ||
805 | bind_ipi.vcpu = cpu; | 805 | bind_ipi.vcpu = cpu; |
@@ -848,7 +848,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
848 | if (irq == -1) | 848 | if (irq == -1) |
849 | goto out; | 849 | goto out; |
850 | 850 | ||
851 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, | 851 | irq_set_chip_and_handler_name(irq, &xen_percpu_chip, |
852 | handle_percpu_irq, "virq"); | 852 | handle_percpu_irq, "virq"); |
853 | 853 | ||
854 | bind_virq.virq = virq; | 854 | bind_virq.virq = virq; |
@@ -1339,7 +1339,7 @@ static void ack_dynirq(struct irq_data *data) | |||
1339 | { | 1339 | { |
1340 | int evtchn = evtchn_from_irq(data->irq); | 1340 | int evtchn = evtchn_from_irq(data->irq); |
1341 | 1341 | ||
1342 | move_masked_irq(data->irq); | 1342 | irq_move_masked_irq(data); |
1343 | 1343 | ||
1344 | if (VALID_EVTCHN(evtchn)) | 1344 | if (VALID_EVTCHN(evtchn)) |
1345 | unmask_evtchn(evtchn); | 1345 | unmask_evtchn(evtchn); |
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 017ce600fbc6..b0f9e8fb0052 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -273,7 +273,7 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) | |||
273 | map->vma->vm_start + map->notify.addr; | 273 | map->vma->vm_start + map->notify.addr; |
274 | err = copy_to_user(tmp, &err, 1); | 274 | err = copy_to_user(tmp, &err, 1); |
275 | if (err) | 275 | if (err) |
276 | return err; | 276 | return -EFAULT; |
277 | map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; | 277 | map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; |
278 | } else if (pgno >= offset && pgno < offset + pages) { | 278 | } else if (pgno >= offset && pgno < offset + pages) { |
279 | uint8_t *tmp = kmap(map->pages[pgno]); | 279 | uint8_t *tmp = kmap(map->pages[pgno]); |
@@ -662,7 +662,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | |||
662 | if (map->flags) { | 662 | if (map->flags) { |
663 | if ((vma->vm_flags & VM_WRITE) && | 663 | if ((vma->vm_flags & VM_WRITE) && |
664 | (map->flags & GNTMAP_readonly)) | 664 | (map->flags & GNTMAP_readonly)) |
665 | return -EINVAL; | 665 | goto out_unlock_put; |
666 | } else { | 666 | } else { |
667 | map->flags = GNTMAP_host_map; | 667 | map->flags = GNTMAP_host_map; |
668 | if (!(vma->vm_flags & VM_WRITE)) | 668 | if (!(vma->vm_flags & VM_WRITE)) |
@@ -700,6 +700,8 @@ unlock_out: | |||
700 | spin_unlock(&priv->lock); | 700 | spin_unlock(&priv->lock); |
701 | return err; | 701 | return err; |
702 | 702 | ||
703 | out_unlock_put: | ||
704 | spin_unlock(&priv->lock); | ||
703 | out_put_map: | 705 | out_put_map: |
704 | if (use_ptemod) | 706 | if (use_ptemod) |
705 | map->vma = NULL; | 707 | map->vma = NULL; |