diff options
Diffstat (limited to 'drivers')
361 files changed, 5241 insertions, 2839 deletions
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h index bc533dde16c4..f895a244ca7e 100644 --- a/drivers/acpi/acpica/acconfig.h +++ b/drivers/acpi/acpica/acconfig.h | |||
@@ -121,7 +121,7 @@ | |||
121 | 121 | ||
122 | /* Maximum sleep allowed via Sleep() operator */ | 122 | /* Maximum sleep allowed via Sleep() operator */ |
123 | 123 | ||
124 | #define ACPI_MAX_SLEEP 20000 /* Two seconds */ | 124 | #define ACPI_MAX_SLEEP 2000 /* Two seconds */ |
125 | 125 | ||
126 | /****************************************************************************** | 126 | /****************************************************************************** |
127 | * | 127 | * |
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index c34aa51af4ee..e3f47872ec22 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig | |||
@@ -13,6 +13,7 @@ config ACPI_APEI_GHES | |||
13 | bool "APEI Generic Hardware Error Source" | 13 | bool "APEI Generic Hardware Error Source" |
14 | depends on ACPI_APEI && X86 | 14 | depends on ACPI_APEI && X86 |
15 | select ACPI_HED | 15 | select ACPI_HED |
16 | select IRQ_WORK | ||
16 | select LLIST | 17 | select LLIST |
17 | select GENERIC_ALLOCATOR | 18 | select GENERIC_ALLOCATOR |
18 | help | 19 | help |
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 8041248fce9b..61540360d5ce 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c | |||
@@ -618,7 +618,7 @@ int apei_osc_setup(void) | |||
618 | }; | 618 | }; |
619 | 619 | ||
620 | capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; | 620 | capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; |
621 | capbuf[OSC_SUPPORT_TYPE] = 0; | 621 | capbuf[OSC_SUPPORT_TYPE] = 1; |
622 | capbuf[OSC_CONTROL_TYPE] = 0; | 622 | capbuf[OSC_CONTROL_TYPE] = 0; |
623 | 623 | ||
624 | if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)) | 624 | if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)) |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index ca3e6be44a04..5987e0ba8c2d 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -468,6 +468,15 @@ config PATA_ICSIDE | |||
468 | interface card. This is not required for ICS partition support. | 468 | interface card. This is not required for ICS partition support. |
469 | If you are unsure, say N to this. | 469 | If you are unsure, say N to this. |
470 | 470 | ||
471 | config PATA_IMX | ||
472 | tristate "PATA support for Freescale iMX" | ||
473 | depends on ARCH_MXC | ||
474 | help | ||
475 | This option enables support for the PATA host available on Freescale | ||
476 | iMX SoCs. | ||
477 | |||
478 | If unsure, say N. | ||
479 | |||
471 | config PATA_IT8213 | 480 | config PATA_IT8213 |
472 | tristate "IT8213 PATA support (Experimental)" | 481 | tristate "IT8213 PATA support (Experimental)" |
473 | depends on PCI && EXPERIMENTAL | 482 | depends on PCI && EXPERIMENTAL |
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 8ac64e1aa051..9550d691fd19 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile | |||
@@ -48,6 +48,7 @@ obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o | |||
48 | obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o | 48 | obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o |
49 | obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o | 49 | obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o |
50 | obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o | 50 | obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o |
51 | obj-$(CONFIG_PATA_IMX) += pata_imx.o | ||
51 | obj-$(CONFIG_PATA_IT8213) += pata_it8213.o | 52 | obj-$(CONFIG_PATA_IT8213) += pata_it8213.o |
52 | obj-$(CONFIG_PATA_IT821X) += pata_it821x.o | 53 | obj-$(CONFIG_PATA_IT821X) += pata_it821x.o |
53 | obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o | 54 | obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o |
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c new file mode 100644 index 000000000000..ca9d9caedfa3 --- /dev/null +++ b/drivers/ata/pata_imx.c | |||
@@ -0,0 +1,253 @@ | |||
1 | /* | ||
2 | * Freescale iMX PATA driver | ||
3 | * | ||
4 | * Copyright (C) 2011 Arnaud Patard <arnaud.patard@rtp-net.org> | ||
5 | * | ||
6 | * Based on pata_platform - Copyright (C) 2006 - 2007 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | * | ||
12 | * TODO: | ||
13 | * - dmaengine support | ||
14 | * - check if timing stuff needed | ||
15 | */ | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/blkdev.h> | ||
20 | #include <scsi/scsi_host.h> | ||
21 | #include <linux/ata.h> | ||
22 | #include <linux/libata.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/clk.h> | ||
25 | |||
26 | #define DRV_NAME "pata_imx" | ||
27 | |||
28 | #define PATA_IMX_ATA_CONTROL 0x24 | ||
29 | #define PATA_IMX_ATA_CTRL_FIFO_RST_B (1<<7) | ||
30 | #define PATA_IMX_ATA_CTRL_ATA_RST_B (1<<6) | ||
31 | #define PATA_IMX_ATA_CTRL_IORDY_EN (1<<0) | ||
32 | #define PATA_IMX_ATA_INT_EN 0x2C | ||
33 | #define PATA_IMX_ATA_INTR_ATA_INTRQ2 (1<<3) | ||
34 | #define PATA_IMX_DRIVE_DATA 0xA0 | ||
35 | #define PATA_IMX_DRIVE_CONTROL 0xD8 | ||
36 | |||
37 | struct pata_imx_priv { | ||
38 | struct clk *clk; | ||
39 | /* timings/interrupt/control regs */ | ||
40 | u8 *host_regs; | ||
41 | u32 ata_ctl; | ||
42 | }; | ||
43 | |||
44 | static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused) | ||
45 | { | ||
46 | struct ata_device *dev; | ||
47 | struct ata_port *ap = link->ap; | ||
48 | struct pata_imx_priv *priv = ap->host->private_data; | ||
49 | u32 val; | ||
50 | |||
51 | ata_for_each_dev(dev, link, ENABLED) { | ||
52 | dev->pio_mode = dev->xfer_mode = XFER_PIO_0; | ||
53 | dev->xfer_shift = ATA_SHIFT_PIO; | ||
54 | dev->flags |= ATA_DFLAG_PIO; | ||
55 | |||
56 | val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL); | ||
57 | if (ata_pio_need_iordy(dev)) | ||
58 | val |= PATA_IMX_ATA_CTRL_IORDY_EN; | ||
59 | else | ||
60 | val &= ~PATA_IMX_ATA_CTRL_IORDY_EN; | ||
61 | __raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL); | ||
62 | |||
63 | ata_dev_printk(dev, KERN_INFO, "configured for PIO\n"); | ||
64 | } | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static struct scsi_host_template pata_imx_sht = { | ||
69 | ATA_PIO_SHT(DRV_NAME), | ||
70 | }; | ||
71 | |||
72 | static struct ata_port_operations pata_imx_port_ops = { | ||
73 | .inherits = &ata_sff_port_ops, | ||
74 | .sff_data_xfer = ata_sff_data_xfer_noirq, | ||
75 | .cable_detect = ata_cable_unknown, | ||
76 | .set_mode = pata_imx_set_mode, | ||
77 | }; | ||
78 | |||
79 | static void pata_imx_setup_port(struct ata_ioports *ioaddr) | ||
80 | { | ||
81 | /* Fixup the port shift for platforms that need it */ | ||
82 | ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << 2); | ||
83 | ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << 2); | ||
84 | ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << 2); | ||
85 | ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << 2); | ||
86 | ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << 2); | ||
87 | ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << 2); | ||
88 | ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << 2); | ||
89 | ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << 2); | ||
90 | ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << 2); | ||
91 | ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2); | ||
92 | } | ||
93 | |||
94 | static int __devinit pata_imx_probe(struct platform_device *pdev) | ||
95 | { | ||
96 | struct ata_host *host; | ||
97 | struct ata_port *ap; | ||
98 | struct pata_imx_priv *priv; | ||
99 | int irq = 0; | ||
100 | struct resource *io_res; | ||
101 | |||
102 | io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
103 | if (io_res == NULL) | ||
104 | return -EINVAL; | ||
105 | |||
106 | irq = platform_get_irq(pdev, 0); | ||
107 | if (irq <= 0) | ||
108 | return -EINVAL; | ||
109 | |||
110 | priv = devm_kzalloc(&pdev->dev, | ||
111 | sizeof(struct pata_imx_priv), GFP_KERNEL); | ||
112 | if (!priv) | ||
113 | return -ENOMEM; | ||
114 | |||
115 | priv->clk = clk_get(&pdev->dev, NULL); | ||
116 | if (IS_ERR(priv->clk)) { | ||
117 | dev_err(&pdev->dev, "Failed to get clock\n"); | ||
118 | return PTR_ERR(priv->clk); | ||
119 | } | ||
120 | |||
121 | clk_enable(priv->clk); | ||
122 | |||
123 | host = ata_host_alloc(&pdev->dev, 1); | ||
124 | if (!host) | ||
125 | goto free_priv; | ||
126 | |||
127 | host->private_data = priv; | ||
128 | ap = host->ports[0]; | ||
129 | |||
130 | ap->ops = &pata_imx_port_ops; | ||
131 | ap->pio_mask = ATA_PIO0; | ||
132 | ap->flags |= ATA_FLAG_SLAVE_POSS; | ||
133 | |||
134 | priv->host_regs = devm_ioremap(&pdev->dev, io_res->start, | ||
135 | resource_size(io_res)); | ||
136 | if (!priv->host_regs) { | ||
137 | dev_err(&pdev->dev, "failed to map IO/CTL base\n"); | ||
138 | goto free_priv; | ||
139 | } | ||
140 | |||
141 | ap->ioaddr.cmd_addr = priv->host_regs + PATA_IMX_DRIVE_DATA; | ||
142 | ap->ioaddr.ctl_addr = priv->host_regs + PATA_IMX_DRIVE_CONTROL; | ||
143 | |||
144 | ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; | ||
145 | |||
146 | pata_imx_setup_port(&ap->ioaddr); | ||
147 | |||
148 | ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", | ||
149 | (unsigned long long)io_res->start + PATA_IMX_DRIVE_DATA, | ||
150 | (unsigned long long)io_res->start + PATA_IMX_DRIVE_CONTROL); | ||
151 | |||
152 | /* deassert resets */ | ||
153 | __raw_writel(PATA_IMX_ATA_CTRL_FIFO_RST_B | | ||
154 | PATA_IMX_ATA_CTRL_ATA_RST_B, | ||
155 | priv->host_regs + PATA_IMX_ATA_CONTROL); | ||
156 | /* enable interrupts */ | ||
157 | __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2, | ||
158 | priv->host_regs + PATA_IMX_ATA_INT_EN); | ||
159 | |||
160 | /* activate */ | ||
161 | return ata_host_activate(host, irq, ata_sff_interrupt, 0, | ||
162 | &pata_imx_sht); | ||
163 | |||
164 | free_priv: | ||
165 | clk_disable(priv->clk); | ||
166 | clk_put(priv->clk); | ||
167 | return -ENOMEM; | ||
168 | } | ||
169 | |||
170 | static int __devexit pata_imx_remove(struct platform_device *pdev) | ||
171 | { | ||
172 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
173 | struct pata_imx_priv *priv = host->private_data; | ||
174 | |||
175 | ata_host_detach(host); | ||
176 | |||
177 | __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN); | ||
178 | |||
179 | clk_disable(priv->clk); | ||
180 | clk_put(priv->clk); | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | #ifdef CONFIG_PM | ||
186 | static int pata_imx_suspend(struct device *dev) | ||
187 | { | ||
188 | struct ata_host *host = dev_get_drvdata(dev); | ||
189 | struct pata_imx_priv *priv = host->private_data; | ||
190 | int ret; | ||
191 | |||
192 | ret = ata_host_suspend(host, PMSG_SUSPEND); | ||
193 | if (!ret) { | ||
194 | __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN); | ||
195 | priv->ata_ctl = | ||
196 | __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL); | ||
197 | clk_disable(priv->clk); | ||
198 | } | ||
199 | |||
200 | return ret; | ||
201 | } | ||
202 | |||
203 | static int pata_imx_resume(struct device *dev) | ||
204 | { | ||
205 | struct ata_host *host = dev_get_drvdata(dev); | ||
206 | struct pata_imx_priv *priv = host->private_data; | ||
207 | |||
208 | clk_enable(priv->clk); | ||
209 | |||
210 | __raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL); | ||
211 | |||
212 | __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2, | ||
213 | priv->host_regs + PATA_IMX_ATA_INT_EN); | ||
214 | |||
215 | ata_host_resume(host); | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | static const struct dev_pm_ops pata_imx_pm_ops = { | ||
221 | .suspend = pata_imx_suspend, | ||
222 | .resume = pata_imx_resume, | ||
223 | }; | ||
224 | #endif | ||
225 | |||
226 | static struct platform_driver pata_imx_driver = { | ||
227 | .probe = pata_imx_probe, | ||
228 | .remove = __devexit_p(pata_imx_remove), | ||
229 | .driver = { | ||
230 | .name = DRV_NAME, | ||
231 | .owner = THIS_MODULE, | ||
232 | #ifdef CONFIG_PM | ||
233 | .pm = &pata_imx_pm_ops, | ||
234 | #endif | ||
235 | }, | ||
236 | }; | ||
237 | |||
238 | static int __init pata_imx_init(void) | ||
239 | { | ||
240 | return platform_driver_register(&pata_imx_driver); | ||
241 | } | ||
242 | |||
243 | static void __exit pata_imx_exit(void) | ||
244 | { | ||
245 | platform_driver_unregister(&pata_imx_driver); | ||
246 | } | ||
247 | module_init(pata_imx_init); | ||
248 | module_exit(pata_imx_exit); | ||
249 | |||
250 | MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>"); | ||
251 | MODULE_DESCRIPTION("low-level driver for iMX PATA"); | ||
252 | MODULE_LICENSE("GPL"); | ||
253 | MODULE_ALIAS("platform:" DRV_NAME); | ||
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 65e4be6be220..8e9f5048a10a 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c | |||
@@ -124,6 +124,17 @@ static const struct via_isa_bridge { | |||
124 | { NULL } | 124 | { NULL } |
125 | }; | 125 | }; |
126 | 126 | ||
127 | static const struct dmi_system_id no_atapi_dma_dmi_table[] = { | ||
128 | { | ||
129 | .ident = "AVERATEC 3200", | ||
130 | .matches = { | ||
131 | DMI_MATCH(DMI_BOARD_VENDOR, "AVERATEC"), | ||
132 | DMI_MATCH(DMI_BOARD_NAME, "3200"), | ||
133 | }, | ||
134 | }, | ||
135 | { } | ||
136 | }; | ||
137 | |||
127 | struct via_port { | 138 | struct via_port { |
128 | u8 cached_device; | 139 | u8 cached_device; |
129 | }; | 140 | }; |
@@ -355,6 +366,13 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask) | |||
355 | mask &= ~ ATA_MASK_UDMA; | 366 | mask &= ~ ATA_MASK_UDMA; |
356 | } | 367 | } |
357 | } | 368 | } |
369 | |||
370 | if (dev->class == ATA_DEV_ATAPI && | ||
371 | dmi_check_system(no_atapi_dma_dmi_table)) { | ||
372 | ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n"); | ||
373 | mask &= ATA_MASK_PIO; | ||
374 | } | ||
375 | |||
358 | return mask; | 376 | return mask; |
359 | } | 377 | } |
360 | 378 | ||
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index 0a9a774a7e1e..5c4237452f50 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c | |||
@@ -1329,7 +1329,7 @@ static int sata_dwc_port_start(struct ata_port *ap) | |||
1329 | dev_err(ap->dev, "%s: dma_alloc_coherent failed\n", | 1329 | dev_err(ap->dev, "%s: dma_alloc_coherent failed\n", |
1330 | __func__); | 1330 | __func__); |
1331 | err = -ENOMEM; | 1331 | err = -ENOMEM; |
1332 | goto CLEANUP; | 1332 | goto CLEANUP_ALLOC; |
1333 | } | 1333 | } |
1334 | } | 1334 | } |
1335 | 1335 | ||
@@ -1349,15 +1349,13 @@ static int sata_dwc_port_start(struct ata_port *ap) | |||
1349 | /* Clear any error bits before libata starts issuing commands */ | 1349 | /* Clear any error bits before libata starts issuing commands */ |
1350 | clear_serror(); | 1350 | clear_serror(); |
1351 | ap->private_data = hsdevp; | 1351 | ap->private_data = hsdevp; |
1352 | dev_dbg(ap->dev, "%s: done\n", __func__); | ||
1353 | return 0; | ||
1352 | 1354 | ||
1355 | CLEANUP_ALLOC: | ||
1356 | kfree(hsdevp); | ||
1353 | CLEANUP: | 1357 | CLEANUP: |
1354 | if (err) { | 1358 | dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id); |
1355 | sata_dwc_port_stop(ap); | ||
1356 | dev_dbg(ap->dev, "%s: fail\n", __func__); | ||
1357 | } else { | ||
1358 | dev_dbg(ap->dev, "%s: done\n", __func__); | ||
1359 | } | ||
1360 | |||
1361 | return err; | 1359 | return err; |
1362 | } | 1360 | } |
1363 | 1361 | ||
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 98c1d780f552..9dfb40b8c2c9 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c | |||
@@ -438,7 +438,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) | |||
438 | u8 status; | 438 | u8 status; |
439 | 439 | ||
440 | if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { | 440 | if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { |
441 | u32 serror; | 441 | u32 serror = 0xffffffff; |
442 | 442 | ||
443 | /* SIEN doesn't mask SATA IRQs on some 3112s. Those | 443 | /* SIEN doesn't mask SATA IRQs on some 3112s. Those |
444 | * controllers continue to assert IRQ as long as | 444 | * controllers continue to assert IRQ as long as |
diff --git a/drivers/base/devres.c b/drivers/base/devres.c index cf7a0c788052..65cd74832450 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c | |||
@@ -397,6 +397,7 @@ static int remove_nodes(struct device *dev, | |||
397 | 397 | ||
398 | static int release_nodes(struct device *dev, struct list_head *first, | 398 | static int release_nodes(struct device *dev, struct list_head *first, |
399 | struct list_head *end, unsigned long flags) | 399 | struct list_head *end, unsigned long flags) |
400 | __releases(&dev->devres_lock) | ||
400 | { | 401 | { |
401 | LIST_HEAD(todo); | 402 | LIST_HEAD(todo); |
402 | int cnt; | 403 | int cnt; |
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 33e1bed68fdd..a4760e095ff5 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c | |||
@@ -376,7 +376,7 @@ int devtmpfs_mount(const char *mntdir) | |||
376 | return err; | 376 | return err; |
377 | } | 377 | } |
378 | 378 | ||
379 | static __initdata DECLARE_COMPLETION(setup_done); | 379 | static DECLARE_COMPLETION(setup_done); |
380 | 380 | ||
381 | static int handle(const char *name, mode_t mode, struct device *dev) | 381 | static int handle(const char *name, mode_t mode, struct device *dev) |
382 | { | 382 | { |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index bbb03e6f7255..06ed6b4e7df5 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -521,11 +521,6 @@ static int _request_firmware(const struct firmware **firmware_p, | |||
521 | if (!firmware_p) | 521 | if (!firmware_p) |
522 | return -EINVAL; | 522 | return -EINVAL; |
523 | 523 | ||
524 | if (WARN_ON(usermodehelper_is_disabled())) { | ||
525 | dev_err(device, "firmware: %s will not be loaded\n", name); | ||
526 | return -EBUSY; | ||
527 | } | ||
528 | |||
529 | *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); | 524 | *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); |
530 | if (!firmware) { | 525 | if (!firmware) { |
531 | dev_err(device, "%s: kmalloc(struct firmware) failed\n", | 526 | dev_err(device, "%s: kmalloc(struct firmware) failed\n", |
@@ -539,6 +534,12 @@ static int _request_firmware(const struct firmware **firmware_p, | |||
539 | return 0; | 534 | return 0; |
540 | } | 535 | } |
541 | 536 | ||
537 | if (WARN_ON(usermodehelper_is_disabled())) { | ||
538 | dev_err(device, "firmware: %s will not be loaded\n", name); | ||
539 | retval = -EBUSY; | ||
540 | goto out; | ||
541 | } | ||
542 | |||
542 | if (uevent) | 543 | if (uevent) |
543 | dev_dbg(device, "firmware: requesting %s\n", name); | 544 | dev_dbg(device, "firmware: requesting %s\n", name); |
544 | 545 | ||
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 0cad9c7f6bb5..99a5272d7c2f 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(platform_bus); | |||
33 | 33 | ||
34 | /** | 34 | /** |
35 | * arch_setup_pdev_archdata - Allow manipulation of archdata before its used | 35 | * arch_setup_pdev_archdata - Allow manipulation of archdata before its used |
36 | * @dev: platform device | 36 | * @pdev: platform device |
37 | * | 37 | * |
38 | * This is called before platform_device_add() such that any pdev_archdata may | 38 | * This is called before platform_device_add() such that any pdev_archdata may |
39 | * be setup before the platform_notifier is called. So if a user needs to | 39 | * be setup before the platform_notifier is called. So if a user needs to |
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index a846b2f95cfb..2c18d584066d 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
@@ -19,7 +19,7 @@ | |||
19 | 19 | ||
20 | struct pm_clk_data { | 20 | struct pm_clk_data { |
21 | struct list_head clock_list; | 21 | struct list_head clock_list; |
22 | struct mutex lock; | 22 | spinlock_t lock; |
23 | }; | 23 | }; |
24 | 24 | ||
25 | enum pce_status { | 25 | enum pce_status { |
@@ -73,9 +73,9 @@ int pm_clk_add(struct device *dev, const char *con_id) | |||
73 | } | 73 | } |
74 | } | 74 | } |
75 | 75 | ||
76 | mutex_lock(&pcd->lock); | 76 | spin_lock_irq(&pcd->lock); |
77 | list_add_tail(&ce->node, &pcd->clock_list); | 77 | list_add_tail(&ce->node, &pcd->clock_list); |
78 | mutex_unlock(&pcd->lock); | 78 | spin_unlock_irq(&pcd->lock); |
79 | return 0; | 79 | return 0; |
80 | } | 80 | } |
81 | 81 | ||
@@ -83,8 +83,8 @@ int pm_clk_add(struct device *dev, const char *con_id) | |||
83 | * __pm_clk_remove - Destroy PM clock entry. | 83 | * __pm_clk_remove - Destroy PM clock entry. |
84 | * @ce: PM clock entry to destroy. | 84 | * @ce: PM clock entry to destroy. |
85 | * | 85 | * |
86 | * This routine must be called under the mutex protecting the PM list of clocks | 86 | * This routine must be called under the spinlock protecting the PM list of |
87 | * corresponding the the @ce's device. | 87 | * clocks corresponding the the @ce's device. |
88 | */ | 88 | */ |
89 | static void __pm_clk_remove(struct pm_clock_entry *ce) | 89 | static void __pm_clk_remove(struct pm_clock_entry *ce) |
90 | { | 90 | { |
@@ -123,7 +123,7 @@ void pm_clk_remove(struct device *dev, const char *con_id) | |||
123 | if (!pcd) | 123 | if (!pcd) |
124 | return; | 124 | return; |
125 | 125 | ||
126 | mutex_lock(&pcd->lock); | 126 | spin_lock_irq(&pcd->lock); |
127 | 127 | ||
128 | list_for_each_entry(ce, &pcd->clock_list, node) { | 128 | list_for_each_entry(ce, &pcd->clock_list, node) { |
129 | if (!con_id && !ce->con_id) { | 129 | if (!con_id && !ce->con_id) { |
@@ -137,7 +137,7 @@ void pm_clk_remove(struct device *dev, const char *con_id) | |||
137 | } | 137 | } |
138 | } | 138 | } |
139 | 139 | ||
140 | mutex_unlock(&pcd->lock); | 140 | spin_unlock_irq(&pcd->lock); |
141 | } | 141 | } |
142 | 142 | ||
143 | /** | 143 | /** |
@@ -158,7 +158,7 @@ int pm_clk_init(struct device *dev) | |||
158 | } | 158 | } |
159 | 159 | ||
160 | INIT_LIST_HEAD(&pcd->clock_list); | 160 | INIT_LIST_HEAD(&pcd->clock_list); |
161 | mutex_init(&pcd->lock); | 161 | spin_lock_init(&pcd->lock); |
162 | dev->power.subsys_data = pcd; | 162 | dev->power.subsys_data = pcd; |
163 | return 0; | 163 | return 0; |
164 | } | 164 | } |
@@ -181,12 +181,12 @@ void pm_clk_destroy(struct device *dev) | |||
181 | 181 | ||
182 | dev->power.subsys_data = NULL; | 182 | dev->power.subsys_data = NULL; |
183 | 183 | ||
184 | mutex_lock(&pcd->lock); | 184 | spin_lock_irq(&pcd->lock); |
185 | 185 | ||
186 | list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) | 186 | list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) |
187 | __pm_clk_remove(ce); | 187 | __pm_clk_remove(ce); |
188 | 188 | ||
189 | mutex_unlock(&pcd->lock); | 189 | spin_unlock_irq(&pcd->lock); |
190 | 190 | ||
191 | kfree(pcd); | 191 | kfree(pcd); |
192 | } | 192 | } |
@@ -220,13 +220,14 @@ int pm_clk_suspend(struct device *dev) | |||
220 | { | 220 | { |
221 | struct pm_clk_data *pcd = __to_pcd(dev); | 221 | struct pm_clk_data *pcd = __to_pcd(dev); |
222 | struct pm_clock_entry *ce; | 222 | struct pm_clock_entry *ce; |
223 | unsigned long flags; | ||
223 | 224 | ||
224 | dev_dbg(dev, "%s()\n", __func__); | 225 | dev_dbg(dev, "%s()\n", __func__); |
225 | 226 | ||
226 | if (!pcd) | 227 | if (!pcd) |
227 | return 0; | 228 | return 0; |
228 | 229 | ||
229 | mutex_lock(&pcd->lock); | 230 | spin_lock_irqsave(&pcd->lock, flags); |
230 | 231 | ||
231 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) { | 232 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) { |
232 | if (ce->status == PCE_STATUS_NONE) | 233 | if (ce->status == PCE_STATUS_NONE) |
@@ -238,7 +239,7 @@ int pm_clk_suspend(struct device *dev) | |||
238 | } | 239 | } |
239 | } | 240 | } |
240 | 241 | ||
241 | mutex_unlock(&pcd->lock); | 242 | spin_unlock_irqrestore(&pcd->lock, flags); |
242 | 243 | ||
243 | return 0; | 244 | return 0; |
244 | } | 245 | } |
@@ -251,13 +252,14 @@ int pm_clk_resume(struct device *dev) | |||
251 | { | 252 | { |
252 | struct pm_clk_data *pcd = __to_pcd(dev); | 253 | struct pm_clk_data *pcd = __to_pcd(dev); |
253 | struct pm_clock_entry *ce; | 254 | struct pm_clock_entry *ce; |
255 | unsigned long flags; | ||
254 | 256 | ||
255 | dev_dbg(dev, "%s()\n", __func__); | 257 | dev_dbg(dev, "%s()\n", __func__); |
256 | 258 | ||
257 | if (!pcd) | 259 | if (!pcd) |
258 | return 0; | 260 | return 0; |
259 | 261 | ||
260 | mutex_lock(&pcd->lock); | 262 | spin_lock_irqsave(&pcd->lock, flags); |
261 | 263 | ||
262 | list_for_each_entry(ce, &pcd->clock_list, node) { | 264 | list_for_each_entry(ce, &pcd->clock_list, node) { |
263 | if (ce->status == PCE_STATUS_NONE) | 265 | if (ce->status == PCE_STATUS_NONE) |
@@ -269,7 +271,7 @@ int pm_clk_resume(struct device *dev) | |||
269 | } | 271 | } |
270 | } | 272 | } |
271 | 273 | ||
272 | mutex_unlock(&pcd->lock); | 274 | spin_unlock_irqrestore(&pcd->lock, flags); |
273 | 275 | ||
274 | return 0; | 276 | return 0; |
275 | } | 277 | } |
@@ -344,6 +346,7 @@ int pm_clk_suspend(struct device *dev) | |||
344 | { | 346 | { |
345 | struct pm_clk_data *pcd = __to_pcd(dev); | 347 | struct pm_clk_data *pcd = __to_pcd(dev); |
346 | struct pm_clock_entry *ce; | 348 | struct pm_clock_entry *ce; |
349 | unsigned long flags; | ||
347 | 350 | ||
348 | dev_dbg(dev, "%s()\n", __func__); | 351 | dev_dbg(dev, "%s()\n", __func__); |
349 | 352 | ||
@@ -351,12 +354,12 @@ int pm_clk_suspend(struct device *dev) | |||
351 | if (!pcd || !dev->driver) | 354 | if (!pcd || !dev->driver) |
352 | return 0; | 355 | return 0; |
353 | 356 | ||
354 | mutex_lock(&pcd->lock); | 357 | spin_lock_irqsave(&pcd->lock, flags); |
355 | 358 | ||
356 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) | 359 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) |
357 | clk_disable(ce->clk); | 360 | clk_disable(ce->clk); |
358 | 361 | ||
359 | mutex_unlock(&pcd->lock); | 362 | spin_unlock_irqrestore(&pcd->lock, flags); |
360 | 363 | ||
361 | return 0; | 364 | return 0; |
362 | } | 365 | } |
@@ -369,6 +372,7 @@ int pm_clk_resume(struct device *dev) | |||
369 | { | 372 | { |
370 | struct pm_clk_data *pcd = __to_pcd(dev); | 373 | struct pm_clk_data *pcd = __to_pcd(dev); |
371 | struct pm_clock_entry *ce; | 374 | struct pm_clock_entry *ce; |
375 | unsigned long flags; | ||
372 | 376 | ||
373 | dev_dbg(dev, "%s()\n", __func__); | 377 | dev_dbg(dev, "%s()\n", __func__); |
374 | 378 | ||
@@ -376,12 +380,12 @@ int pm_clk_resume(struct device *dev) | |||
376 | if (!pcd || !dev->driver) | 380 | if (!pcd || !dev->driver) |
377 | return 0; | 381 | return 0; |
378 | 382 | ||
379 | mutex_lock(&pcd->lock); | 383 | spin_lock_irqsave(&pcd->lock, flags); |
380 | 384 | ||
381 | list_for_each_entry(ce, &pcd->clock_list, node) | 385 | list_for_each_entry(ce, &pcd->clock_list, node) |
382 | clk_enable(ce->clk); | 386 | clk_enable(ce->clk); |
383 | 387 | ||
384 | mutex_unlock(&pcd->lock); | 388 | spin_unlock_irqrestore(&pcd->lock, flags); |
385 | 389 | ||
386 | return 0; | 390 | return 0; |
387 | } | 391 | } |
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index e18566a0fedd..1c374579407c 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -460,6 +460,21 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
460 | return 0; | 460 | return 0; |
461 | } | 461 | } |
462 | 462 | ||
463 | /** | ||
464 | * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. | ||
465 | */ | ||
466 | void pm_genpd_poweroff_unused(void) | ||
467 | { | ||
468 | struct generic_pm_domain *genpd; | ||
469 | |||
470 | mutex_lock(&gpd_list_lock); | ||
471 | |||
472 | list_for_each_entry(genpd, &gpd_list, gpd_list_node) | ||
473 | genpd_queue_power_off_work(genpd); | ||
474 | |||
475 | mutex_unlock(&gpd_list_lock); | ||
476 | } | ||
477 | |||
463 | #else | 478 | #else |
464 | 479 | ||
465 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} | 480 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} |
@@ -1255,18 +1270,3 @@ void pm_genpd_init(struct generic_pm_domain *genpd, | |||
1255 | list_add(&genpd->gpd_list_node, &gpd_list); | 1270 | list_add(&genpd->gpd_list_node, &gpd_list); |
1256 | mutex_unlock(&gpd_list_lock); | 1271 | mutex_unlock(&gpd_list_lock); |
1257 | } | 1272 | } |
1258 | |||
1259 | /** | ||
1260 | * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. | ||
1261 | */ | ||
1262 | void pm_genpd_poweroff_unused(void) | ||
1263 | { | ||
1264 | struct generic_pm_domain *genpd; | ||
1265 | |||
1266 | mutex_lock(&gpd_list_lock); | ||
1267 | |||
1268 | list_for_each_entry(genpd, &gpd_list, gpd_list_node) | ||
1269 | genpd_queue_power_off_work(genpd); | ||
1270 | |||
1271 | mutex_unlock(&gpd_list_lock); | ||
1272 | } | ||
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index c2231ff06cbc..c4f7a45cd2c3 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c | |||
@@ -113,3 +113,4 @@ struct regmap *regmap_init_i2c(struct i2c_client *i2c, | |||
113 | } | 113 | } |
114 | EXPORT_SYMBOL_GPL(regmap_init_i2c); | 114 | EXPORT_SYMBOL_GPL(regmap_init_i2c); |
115 | 115 | ||
116 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c index 4deba0621bc7..f8396945d6ed 100644 --- a/drivers/base/regmap/regmap-spi.c +++ b/drivers/base/regmap/regmap-spi.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/regmap.h> | 13 | #include <linux/regmap.h> |
14 | #include <linux/spi/spi.h> | 14 | #include <linux/spi/spi.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/module.h> | ||
16 | 17 | ||
17 | static int regmap_spi_write(struct device *dev, const void *data, size_t count) | 18 | static int regmap_spi_write(struct device *dev, const void *data, size_t count) |
18 | { | 19 | { |
@@ -70,3 +71,5 @@ struct regmap *regmap_init_spi(struct spi_device *spi, | |||
70 | return regmap_init(&spi->dev, ®map_spi, config); | 71 | return regmap_init(&spi->dev, ®map_spi, config); |
71 | } | 72 | } |
72 | EXPORT_SYMBOL_GPL(regmap_init_spi); | 73 | EXPORT_SYMBOL_GPL(regmap_init_spi); |
74 | |||
75 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index cf3565cae93d..20663f8dae45 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
@@ -168,13 +168,11 @@ struct regmap *regmap_init(struct device *dev, | |||
168 | map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL); | 168 | map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL); |
169 | if (map->work_buf == NULL) { | 169 | if (map->work_buf == NULL) { |
170 | ret = -ENOMEM; | 170 | ret = -ENOMEM; |
171 | goto err_bus; | 171 | goto err_map; |
172 | } | 172 | } |
173 | 173 | ||
174 | return map; | 174 | return map; |
175 | 175 | ||
176 | err_bus: | ||
177 | module_put(map->bus->owner); | ||
178 | err_map: | 176 | err_map: |
179 | kfree(map); | 177 | kfree(map); |
180 | err: | 178 | err: |
@@ -188,7 +186,6 @@ EXPORT_SYMBOL_GPL(regmap_init); | |||
188 | void regmap_exit(struct regmap *map) | 186 | void regmap_exit(struct regmap *map) |
189 | { | 187 | { |
190 | kfree(map->work_buf); | 188 | kfree(map->work_buf); |
191 | module_put(map->bus->owner); | ||
192 | kfree(map); | 189 | kfree(map); |
193 | } | 190 | } |
194 | EXPORT_SYMBOL_GPL(regmap_exit); | 191 | EXPORT_SYMBOL_GPL(regmap_exit); |
@@ -317,7 +314,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, | |||
317 | u8[0] |= map->bus->read_flag_mask; | 314 | u8[0] |= map->bus->read_flag_mask; |
318 | 315 | ||
319 | ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes, | 316 | ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes, |
320 | val, map->format.val_bytes); | 317 | val, val_len); |
321 | if (ret != 0) | 318 | if (ret != 0) |
322 | return ret; | 319 | return ret; |
323 | 320 | ||
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 7072216a2a3f..8c09c3e547cd 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c | |||
@@ -15,6 +15,7 @@ MODULE_LICENSE("GPL"); | |||
15 | static int bcma_bus_match(struct device *dev, struct device_driver *drv); | 15 | static int bcma_bus_match(struct device *dev, struct device_driver *drv); |
16 | static int bcma_device_probe(struct device *dev); | 16 | static int bcma_device_probe(struct device *dev); |
17 | static int bcma_device_remove(struct device *dev); | 17 | static int bcma_device_remove(struct device *dev); |
18 | static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env); | ||
18 | 19 | ||
19 | static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf) | 20 | static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf) |
20 | { | 21 | { |
@@ -49,6 +50,7 @@ static struct bus_type bcma_bus_type = { | |||
49 | .match = bcma_bus_match, | 50 | .match = bcma_bus_match, |
50 | .probe = bcma_device_probe, | 51 | .probe = bcma_device_probe, |
51 | .remove = bcma_device_remove, | 52 | .remove = bcma_device_remove, |
53 | .uevent = bcma_device_uevent, | ||
52 | .dev_attrs = bcma_device_attrs, | 54 | .dev_attrs = bcma_device_attrs, |
53 | }; | 55 | }; |
54 | 56 | ||
@@ -295,6 +297,16 @@ static int bcma_device_remove(struct device *dev) | |||
295 | return 0; | 297 | return 0; |
296 | } | 298 | } |
297 | 299 | ||
300 | static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
301 | { | ||
302 | struct bcma_device *core = container_of(dev, struct bcma_device, dev); | ||
303 | |||
304 | return add_uevent_var(env, | ||
305 | "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X", | ||
306 | core->id.manuf, core->id.id, | ||
307 | core->id.rev, core->id.class); | ||
308 | } | ||
309 | |||
298 | static int __init bcma_modinit(void) | 310 | static int __init bcma_modinit(void) |
299 | { | 311 | { |
300 | int err; | 312 | int err; |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 717d6e4e18d3..6f07ec1c2f58 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -256,6 +256,21 @@ config BLK_DEV_LOOP | |||
256 | 256 | ||
257 | Most users will answer N here. | 257 | Most users will answer N here. |
258 | 258 | ||
259 | config BLK_DEV_LOOP_MIN_COUNT | ||
260 | int "Number of loop devices to pre-create at init time" | ||
261 | depends on BLK_DEV_LOOP | ||
262 | default 8 | ||
263 | help | ||
264 | Static number of loop devices to be unconditionally pre-created | ||
265 | at init time. | ||
266 | |||
267 | This default value can be overwritten on the kernel command | ||
268 | line or with module-parameter loop.max_loop. | ||
269 | |||
270 | The historic default is 8. If a late 2011 version of losetup(8) | ||
271 | is used, it can be set to 0, since needed loop devices can be | ||
272 | dynamically allocated with the /dev/loop-control interface. | ||
273 | |||
259 | config BLK_DEV_CRYPTOLOOP | 274 | config BLK_DEV_CRYPTOLOOP |
260 | tristate "Cryptoloop Support" | 275 | tristate "Cryptoloop Support" |
261 | select CRYPTO | 276 | select CRYPTO |
@@ -471,7 +486,7 @@ config XEN_BLKDEV_FRONTEND | |||
471 | in another domain which drives the actual block device. | 486 | in another domain which drives the actual block device. |
472 | 487 | ||
473 | config XEN_BLKDEV_BACKEND | 488 | config XEN_BLKDEV_BACKEND |
474 | tristate "Block-device backend driver" | 489 | tristate "Xen block-device backend driver" |
475 | depends on XEN_BACKEND | 490 | depends on XEN_BACKEND |
476 | help | 491 | help |
477 | The block-device backend driver allows the kernel to export its | 492 | The block-device backend driver allows the kernel to export its |
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 515bcd948a43..0feab261e295 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
@@ -1829,10 +1829,10 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n | |||
1829 | 1829 | ||
1830 | /* silently ignore cpu mask on UP kernel */ | 1830 | /* silently ignore cpu mask on UP kernel */ |
1831 | if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { | 1831 | if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { |
1832 | err = __bitmap_parse(sc.cpu_mask, 32, 0, | 1832 | err = bitmap_parse(sc.cpu_mask, 32, |
1833 | cpumask_bits(new_cpu_mask), nr_cpu_ids); | 1833 | cpumask_bits(new_cpu_mask), nr_cpu_ids); |
1834 | if (err) { | 1834 | if (err) { |
1835 | dev_warn(DEV, "__bitmap_parse() failed with %d\n", err); | 1835 | dev_warn(DEV, "bitmap_parse() failed with %d\n", err); |
1836 | retcode = ERR_CPU_MASK_PARSE; | 1836 | retcode = ERR_CPU_MASK_PARSE; |
1837 | goto fail; | 1837 | goto fail; |
1838 | } | 1838 | } |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 98de8f418676..9955a53733b2 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -4250,7 +4250,7 @@ static int __init floppy_init(void) | |||
4250 | use_virtual_dma = can_use_virtual_dma & 1; | 4250 | use_virtual_dma = can_use_virtual_dma & 1; |
4251 | fdc_state[0].address = FDC1; | 4251 | fdc_state[0].address = FDC1; |
4252 | if (fdc_state[0].address == -1) { | 4252 | if (fdc_state[0].address == -1) { |
4253 | del_timer(&fd_timeout); | 4253 | del_timer_sync(&fd_timeout); |
4254 | err = -ENODEV; | 4254 | err = -ENODEV; |
4255 | goto out_unreg_region; | 4255 | goto out_unreg_region; |
4256 | } | 4256 | } |
@@ -4261,7 +4261,7 @@ static int __init floppy_init(void) | |||
4261 | fdc = 0; /* reset fdc in case of unexpected interrupt */ | 4261 | fdc = 0; /* reset fdc in case of unexpected interrupt */ |
4262 | err = floppy_grab_irq_and_dma(); | 4262 | err = floppy_grab_irq_and_dma(); |
4263 | if (err) { | 4263 | if (err) { |
4264 | del_timer(&fd_timeout); | 4264 | del_timer_sync(&fd_timeout); |
4265 | err = -EBUSY; | 4265 | err = -EBUSY; |
4266 | goto out_unreg_region; | 4266 | goto out_unreg_region; |
4267 | } | 4267 | } |
@@ -4318,7 +4318,7 @@ static int __init floppy_init(void) | |||
4318 | user_reset_fdc(-1, FD_RESET_ALWAYS, false); | 4318 | user_reset_fdc(-1, FD_RESET_ALWAYS, false); |
4319 | } | 4319 | } |
4320 | fdc = 0; | 4320 | fdc = 0; |
4321 | del_timer(&fd_timeout); | 4321 | del_timer_sync(&fd_timeout); |
4322 | current_drive = 0; | 4322 | current_drive = 0; |
4323 | initialized = true; | 4323 | initialized = true; |
4324 | if (have_no_fdc) { | 4324 | if (have_no_fdc) { |
@@ -4368,7 +4368,7 @@ out_unreg_blkdev: | |||
4368 | unregister_blkdev(FLOPPY_MAJOR, "fd"); | 4368 | unregister_blkdev(FLOPPY_MAJOR, "fd"); |
4369 | out_put_disk: | 4369 | out_put_disk: |
4370 | while (dr--) { | 4370 | while (dr--) { |
4371 | del_timer(&motor_off_timer[dr]); | 4371 | del_timer_sync(&motor_off_timer[dr]); |
4372 | if (disks[dr]->queue) | 4372 | if (disks[dr]->queue) |
4373 | blk_cleanup_queue(disks[dr]->queue); | 4373 | blk_cleanup_queue(disks[dr]->queue); |
4374 | put_disk(disks[dr]); | 4374 | put_disk(disks[dr]); |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 76c8da78212b..4720c7ade0ae 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -75,11 +75,11 @@ | |||
75 | #include <linux/kthread.h> | 75 | #include <linux/kthread.h> |
76 | #include <linux/splice.h> | 76 | #include <linux/splice.h> |
77 | #include <linux/sysfs.h> | 77 | #include <linux/sysfs.h> |
78 | 78 | #include <linux/miscdevice.h> | |
79 | #include <asm/uaccess.h> | 79 | #include <asm/uaccess.h> |
80 | 80 | ||
81 | static LIST_HEAD(loop_devices); | 81 | static DEFINE_IDR(loop_index_idr); |
82 | static DEFINE_MUTEX(loop_devices_mutex); | 82 | static DEFINE_MUTEX(loop_index_mutex); |
83 | 83 | ||
84 | static int max_part; | 84 | static int max_part; |
85 | static int part_shift; | 85 | static int part_shift; |
@@ -722,17 +722,10 @@ static inline int is_loop_device(struct file *file) | |||
722 | static ssize_t loop_attr_show(struct device *dev, char *page, | 722 | static ssize_t loop_attr_show(struct device *dev, char *page, |
723 | ssize_t (*callback)(struct loop_device *, char *)) | 723 | ssize_t (*callback)(struct loop_device *, char *)) |
724 | { | 724 | { |
725 | struct loop_device *l, *lo = NULL; | 725 | struct gendisk *disk = dev_to_disk(dev); |
726 | 726 | struct loop_device *lo = disk->private_data; | |
727 | mutex_lock(&loop_devices_mutex); | ||
728 | list_for_each_entry(l, &loop_devices, lo_list) | ||
729 | if (disk_to_dev(l->lo_disk) == dev) { | ||
730 | lo = l; | ||
731 | break; | ||
732 | } | ||
733 | mutex_unlock(&loop_devices_mutex); | ||
734 | 727 | ||
735 | return lo ? callback(lo, page) : -EIO; | 728 | return callback(lo, page); |
736 | } | 729 | } |
737 | 730 | ||
738 | #define LOOP_ATTR_RO(_name) \ | 731 | #define LOOP_ATTR_RO(_name) \ |
@@ -750,10 +743,10 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) | |||
750 | ssize_t ret; | 743 | ssize_t ret; |
751 | char *p = NULL; | 744 | char *p = NULL; |
752 | 745 | ||
753 | mutex_lock(&lo->lo_ctl_mutex); | 746 | spin_lock_irq(&lo->lo_lock); |
754 | if (lo->lo_backing_file) | 747 | if (lo->lo_backing_file) |
755 | p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1); | 748 | p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1); |
756 | mutex_unlock(&lo->lo_ctl_mutex); | 749 | spin_unlock_irq(&lo->lo_lock); |
757 | 750 | ||
758 | if (IS_ERR_OR_NULL(p)) | 751 | if (IS_ERR_OR_NULL(p)) |
759 | ret = PTR_ERR(p); | 752 | ret = PTR_ERR(p); |
@@ -1007,7 +1000,9 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) | |||
1007 | 1000 | ||
1008 | kthread_stop(lo->lo_thread); | 1001 | kthread_stop(lo->lo_thread); |
1009 | 1002 | ||
1003 | spin_lock_irq(&lo->lo_lock); | ||
1010 | lo->lo_backing_file = NULL; | 1004 | lo->lo_backing_file = NULL; |
1005 | spin_unlock_irq(&lo->lo_lock); | ||
1011 | 1006 | ||
1012 | loop_release_xfer(lo); | 1007 | loop_release_xfer(lo); |
1013 | lo->transfer = NULL; | 1008 | lo->transfer = NULL; |
@@ -1485,13 +1480,22 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, | |||
1485 | 1480 | ||
1486 | static int lo_open(struct block_device *bdev, fmode_t mode) | 1481 | static int lo_open(struct block_device *bdev, fmode_t mode) |
1487 | { | 1482 | { |
1488 | struct loop_device *lo = bdev->bd_disk->private_data; | 1483 | struct loop_device *lo; |
1484 | int err = 0; | ||
1485 | |||
1486 | mutex_lock(&loop_index_mutex); | ||
1487 | lo = bdev->bd_disk->private_data; | ||
1488 | if (!lo) { | ||
1489 | err = -ENXIO; | ||
1490 | goto out; | ||
1491 | } | ||
1489 | 1492 | ||
1490 | mutex_lock(&lo->lo_ctl_mutex); | 1493 | mutex_lock(&lo->lo_ctl_mutex); |
1491 | lo->lo_refcnt++; | 1494 | lo->lo_refcnt++; |
1492 | mutex_unlock(&lo->lo_ctl_mutex); | 1495 | mutex_unlock(&lo->lo_ctl_mutex); |
1493 | 1496 | out: | |
1494 | return 0; | 1497 | mutex_unlock(&loop_index_mutex); |
1498 | return err; | ||
1495 | } | 1499 | } |
1496 | 1500 | ||
1497 | static int lo_release(struct gendisk *disk, fmode_t mode) | 1501 | static int lo_release(struct gendisk *disk, fmode_t mode) |
@@ -1557,40 +1561,71 @@ int loop_register_transfer(struct loop_func_table *funcs) | |||
1557 | return 0; | 1561 | return 0; |
1558 | } | 1562 | } |
1559 | 1563 | ||
1564 | static int unregister_transfer_cb(int id, void *ptr, void *data) | ||
1565 | { | ||
1566 | struct loop_device *lo = ptr; | ||
1567 | struct loop_func_table *xfer = data; | ||
1568 | |||
1569 | mutex_lock(&lo->lo_ctl_mutex); | ||
1570 | if (lo->lo_encryption == xfer) | ||
1571 | loop_release_xfer(lo); | ||
1572 | mutex_unlock(&lo->lo_ctl_mutex); | ||
1573 | return 0; | ||
1574 | } | ||
1575 | |||
1560 | int loop_unregister_transfer(int number) | 1576 | int loop_unregister_transfer(int number) |
1561 | { | 1577 | { |
1562 | unsigned int n = number; | 1578 | unsigned int n = number; |
1563 | struct loop_device *lo; | ||
1564 | struct loop_func_table *xfer; | 1579 | struct loop_func_table *xfer; |
1565 | 1580 | ||
1566 | if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) | 1581 | if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) |
1567 | return -EINVAL; | 1582 | return -EINVAL; |
1568 | 1583 | ||
1569 | xfer_funcs[n] = NULL; | 1584 | xfer_funcs[n] = NULL; |
1570 | 1585 | idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer); | |
1571 | list_for_each_entry(lo, &loop_devices, lo_list) { | ||
1572 | mutex_lock(&lo->lo_ctl_mutex); | ||
1573 | |||
1574 | if (lo->lo_encryption == xfer) | ||
1575 | loop_release_xfer(lo); | ||
1576 | |||
1577 | mutex_unlock(&lo->lo_ctl_mutex); | ||
1578 | } | ||
1579 | |||
1580 | return 0; | 1586 | return 0; |
1581 | } | 1587 | } |
1582 | 1588 | ||
1583 | EXPORT_SYMBOL(loop_register_transfer); | 1589 | EXPORT_SYMBOL(loop_register_transfer); |
1584 | EXPORT_SYMBOL(loop_unregister_transfer); | 1590 | EXPORT_SYMBOL(loop_unregister_transfer); |
1585 | 1591 | ||
1586 | static struct loop_device *loop_alloc(int i) | 1592 | static int loop_add(struct loop_device **l, int i) |
1587 | { | 1593 | { |
1588 | struct loop_device *lo; | 1594 | struct loop_device *lo; |
1589 | struct gendisk *disk; | 1595 | struct gendisk *disk; |
1596 | int err; | ||
1590 | 1597 | ||
1591 | lo = kzalloc(sizeof(*lo), GFP_KERNEL); | 1598 | lo = kzalloc(sizeof(*lo), GFP_KERNEL); |
1592 | if (!lo) | 1599 | if (!lo) { |
1600 | err = -ENOMEM; | ||
1593 | goto out; | 1601 | goto out; |
1602 | } | ||
1603 | |||
1604 | err = idr_pre_get(&loop_index_idr, GFP_KERNEL); | ||
1605 | if (err < 0) | ||
1606 | goto out_free_dev; | ||
1607 | |||
1608 | if (i >= 0) { | ||
1609 | int m; | ||
1610 | |||
1611 | /* create specific i in the index */ | ||
1612 | err = idr_get_new_above(&loop_index_idr, lo, i, &m); | ||
1613 | if (err >= 0 && i != m) { | ||
1614 | idr_remove(&loop_index_idr, m); | ||
1615 | err = -EEXIST; | ||
1616 | } | ||
1617 | } else if (i == -1) { | ||
1618 | int m; | ||
1619 | |||
1620 | /* get next free nr */ | ||
1621 | err = idr_get_new(&loop_index_idr, lo, &m); | ||
1622 | if (err >= 0) | ||
1623 | i = m; | ||
1624 | } else { | ||
1625 | err = -EINVAL; | ||
1626 | } | ||
1627 | if (err < 0) | ||
1628 | goto out_free_dev; | ||
1594 | 1629 | ||
1595 | lo->lo_queue = blk_alloc_queue(GFP_KERNEL); | 1630 | lo->lo_queue = blk_alloc_queue(GFP_KERNEL); |
1596 | if (!lo->lo_queue) | 1631 | if (!lo->lo_queue) |
@@ -1611,81 +1646,158 @@ static struct loop_device *loop_alloc(int i) | |||
1611 | disk->private_data = lo; | 1646 | disk->private_data = lo; |
1612 | disk->queue = lo->lo_queue; | 1647 | disk->queue = lo->lo_queue; |
1613 | sprintf(disk->disk_name, "loop%d", i); | 1648 | sprintf(disk->disk_name, "loop%d", i); |
1614 | return lo; | 1649 | add_disk(disk); |
1650 | *l = lo; | ||
1651 | return lo->lo_number; | ||
1615 | 1652 | ||
1616 | out_free_queue: | 1653 | out_free_queue: |
1617 | blk_cleanup_queue(lo->lo_queue); | 1654 | blk_cleanup_queue(lo->lo_queue); |
1618 | out_free_dev: | 1655 | out_free_dev: |
1619 | kfree(lo); | 1656 | kfree(lo); |
1620 | out: | 1657 | out: |
1621 | return NULL; | 1658 | return err; |
1622 | } | 1659 | } |
1623 | 1660 | ||
1624 | static void loop_free(struct loop_device *lo) | 1661 | static void loop_remove(struct loop_device *lo) |
1625 | { | 1662 | { |
1663 | del_gendisk(lo->lo_disk); | ||
1626 | blk_cleanup_queue(lo->lo_queue); | 1664 | blk_cleanup_queue(lo->lo_queue); |
1627 | put_disk(lo->lo_disk); | 1665 | put_disk(lo->lo_disk); |
1628 | list_del(&lo->lo_list); | ||
1629 | kfree(lo); | 1666 | kfree(lo); |
1630 | } | 1667 | } |
1631 | 1668 | ||
1632 | static struct loop_device *loop_init_one(int i) | 1669 | static int find_free_cb(int id, void *ptr, void *data) |
1670 | { | ||
1671 | struct loop_device *lo = ptr; | ||
1672 | struct loop_device **l = data; | ||
1673 | |||
1674 | if (lo->lo_state == Lo_unbound) { | ||
1675 | *l = lo; | ||
1676 | return 1; | ||
1677 | } | ||
1678 | return 0; | ||
1679 | } | ||
1680 | |||
1681 | static int loop_lookup(struct loop_device **l, int i) | ||
1633 | { | 1682 | { |
1634 | struct loop_device *lo; | 1683 | struct loop_device *lo; |
1684 | int ret = -ENODEV; | ||
1635 | 1685 | ||
1636 | list_for_each_entry(lo, &loop_devices, lo_list) { | 1686 | if (i < 0) { |
1637 | if (lo->lo_number == i) | 1687 | int err; |
1638 | return lo; | 1688 | |
1689 | err = idr_for_each(&loop_index_idr, &find_free_cb, &lo); | ||
1690 | if (err == 1) { | ||
1691 | *l = lo; | ||
1692 | ret = lo->lo_number; | ||
1693 | } | ||
1694 | goto out; | ||
1639 | } | 1695 | } |
1640 | 1696 | ||
1641 | lo = loop_alloc(i); | 1697 | /* lookup and return a specific i */ |
1698 | lo = idr_find(&loop_index_idr, i); | ||
1642 | if (lo) { | 1699 | if (lo) { |
1643 | add_disk(lo->lo_disk); | 1700 | *l = lo; |
1644 | list_add_tail(&lo->lo_list, &loop_devices); | 1701 | ret = lo->lo_number; |
1645 | } | 1702 | } |
1646 | return lo; | 1703 | out: |
1647 | } | 1704 | return ret; |
1648 | |||
1649 | static void loop_del_one(struct loop_device *lo) | ||
1650 | { | ||
1651 | del_gendisk(lo->lo_disk); | ||
1652 | loop_free(lo); | ||
1653 | } | 1705 | } |
1654 | 1706 | ||
1655 | static struct kobject *loop_probe(dev_t dev, int *part, void *data) | 1707 | static struct kobject *loop_probe(dev_t dev, int *part, void *data) |
1656 | { | 1708 | { |
1657 | struct loop_device *lo; | 1709 | struct loop_device *lo; |
1658 | struct kobject *kobj; | 1710 | struct kobject *kobj; |
1711 | int err; | ||
1659 | 1712 | ||
1660 | mutex_lock(&loop_devices_mutex); | 1713 | mutex_lock(&loop_index_mutex); |
1661 | lo = loop_init_one(MINOR(dev) >> part_shift); | 1714 | err = loop_lookup(&lo, MINOR(dev) >> part_shift); |
1662 | kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM); | 1715 | if (err < 0) |
1663 | mutex_unlock(&loop_devices_mutex); | 1716 | err = loop_add(&lo, MINOR(dev) >> part_shift); |
1717 | if (err < 0) | ||
1718 | kobj = ERR_PTR(err); | ||
1719 | else | ||
1720 | kobj = get_disk(lo->lo_disk); | ||
1721 | mutex_unlock(&loop_index_mutex); | ||
1664 | 1722 | ||
1665 | *part = 0; | 1723 | *part = 0; |
1666 | return kobj; | 1724 | return kobj; |
1667 | } | 1725 | } |
1668 | 1726 | ||
1727 | static long loop_control_ioctl(struct file *file, unsigned int cmd, | ||
1728 | unsigned long parm) | ||
1729 | { | ||
1730 | struct loop_device *lo; | ||
1731 | int ret = -ENOSYS; | ||
1732 | |||
1733 | mutex_lock(&loop_index_mutex); | ||
1734 | switch (cmd) { | ||
1735 | case LOOP_CTL_ADD: | ||
1736 | ret = loop_lookup(&lo, parm); | ||
1737 | if (ret >= 0) { | ||
1738 | ret = -EEXIST; | ||
1739 | break; | ||
1740 | } | ||
1741 | ret = loop_add(&lo, parm); | ||
1742 | break; | ||
1743 | case LOOP_CTL_REMOVE: | ||
1744 | ret = loop_lookup(&lo, parm); | ||
1745 | if (ret < 0) | ||
1746 | break; | ||
1747 | mutex_lock(&lo->lo_ctl_mutex); | ||
1748 | if (lo->lo_state != Lo_unbound) { | ||
1749 | ret = -EBUSY; | ||
1750 | mutex_unlock(&lo->lo_ctl_mutex); | ||
1751 | break; | ||
1752 | } | ||
1753 | if (lo->lo_refcnt > 0) { | ||
1754 | ret = -EBUSY; | ||
1755 | mutex_unlock(&lo->lo_ctl_mutex); | ||
1756 | break; | ||
1757 | } | ||
1758 | lo->lo_disk->private_data = NULL; | ||
1759 | mutex_unlock(&lo->lo_ctl_mutex); | ||
1760 | idr_remove(&loop_index_idr, lo->lo_number); | ||
1761 | loop_remove(lo); | ||
1762 | break; | ||
1763 | case LOOP_CTL_GET_FREE: | ||
1764 | ret = loop_lookup(&lo, -1); | ||
1765 | if (ret >= 0) | ||
1766 | break; | ||
1767 | ret = loop_add(&lo, -1); | ||
1768 | } | ||
1769 | mutex_unlock(&loop_index_mutex); | ||
1770 | |||
1771 | return ret; | ||
1772 | } | ||
1773 | |||
1774 | static const struct file_operations loop_ctl_fops = { | ||
1775 | .open = nonseekable_open, | ||
1776 | .unlocked_ioctl = loop_control_ioctl, | ||
1777 | .compat_ioctl = loop_control_ioctl, | ||
1778 | .owner = THIS_MODULE, | ||
1779 | .llseek = noop_llseek, | ||
1780 | }; | ||
1781 | |||
1782 | static struct miscdevice loop_misc = { | ||
1783 | .minor = LOOP_CTRL_MINOR, | ||
1784 | .name = "loop-control", | ||
1785 | .fops = &loop_ctl_fops, | ||
1786 | }; | ||
1787 | |||
1788 | MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR); | ||
1789 | MODULE_ALIAS("devname:loop-control"); | ||
1790 | |||
1669 | static int __init loop_init(void) | 1791 | static int __init loop_init(void) |
1670 | { | 1792 | { |
1671 | int i, nr; | 1793 | int i, nr; |
1672 | unsigned long range; | 1794 | unsigned long range; |
1673 | struct loop_device *lo, *next; | 1795 | struct loop_device *lo; |
1796 | int err; | ||
1674 | 1797 | ||
1675 | /* | 1798 | err = misc_register(&loop_misc); |
1676 | * loop module now has a feature to instantiate underlying device | 1799 | if (err < 0) |
1677 | * structure on-demand, provided that there is an access dev node. | 1800 | return err; |
1678 | * However, this will not work well with user space tool that doesn't | ||
1679 | * know about such "feature". In order to not break any existing | ||
1680 | * tool, we do the following: | ||
1681 | * | ||
1682 | * (1) if max_loop is specified, create that many upfront, and this | ||
1683 | * also becomes a hard limit. | ||
1684 | * (2) if max_loop is not specified, create 8 loop device on module | ||
1685 | * load, user can further extend loop device by create dev node | ||
1686 | * themselves and have kernel automatically instantiate actual | ||
1687 | * device on-demand. | ||
1688 | */ | ||
1689 | 1801 | ||
1690 | part_shift = 0; | 1802 | part_shift = 0; |
1691 | if (max_part > 0) { | 1803 | if (max_part > 0) { |
@@ -1708,57 +1820,60 @@ static int __init loop_init(void) | |||
1708 | if (max_loop > 1UL << (MINORBITS - part_shift)) | 1820 | if (max_loop > 1UL << (MINORBITS - part_shift)) |
1709 | return -EINVAL; | 1821 | return -EINVAL; |
1710 | 1822 | ||
1823 | /* | ||
1824 | * If max_loop is specified, create that many devices upfront. | ||
1825 | * This also becomes a hard limit. If max_loop is not specified, | ||
1826 | * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module | ||
1827 | * init time. Loop devices can be requested on-demand with the | ||
1828 | * /dev/loop-control interface, or be instantiated by accessing | ||
1829 | * a 'dead' device node. | ||
1830 | */ | ||
1711 | if (max_loop) { | 1831 | if (max_loop) { |
1712 | nr = max_loop; | 1832 | nr = max_loop; |
1713 | range = max_loop << part_shift; | 1833 | range = max_loop << part_shift; |
1714 | } else { | 1834 | } else { |
1715 | nr = 8; | 1835 | nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; |
1716 | range = 1UL << MINORBITS; | 1836 | range = 1UL << MINORBITS; |
1717 | } | 1837 | } |
1718 | 1838 | ||
1719 | if (register_blkdev(LOOP_MAJOR, "loop")) | 1839 | if (register_blkdev(LOOP_MAJOR, "loop")) |
1720 | return -EIO; | 1840 | return -EIO; |
1721 | 1841 | ||
1722 | for (i = 0; i < nr; i++) { | ||
1723 | lo = loop_alloc(i); | ||
1724 | if (!lo) | ||
1725 | goto Enomem; | ||
1726 | list_add_tail(&lo->lo_list, &loop_devices); | ||
1727 | } | ||
1728 | |||
1729 | /* point of no return */ | ||
1730 | |||
1731 | list_for_each_entry(lo, &loop_devices, lo_list) | ||
1732 | add_disk(lo->lo_disk); | ||
1733 | |||
1734 | blk_register_region(MKDEV(LOOP_MAJOR, 0), range, | 1842 | blk_register_region(MKDEV(LOOP_MAJOR, 0), range, |
1735 | THIS_MODULE, loop_probe, NULL, NULL); | 1843 | THIS_MODULE, loop_probe, NULL, NULL); |
1736 | 1844 | ||
1845 | /* pre-create number of devices given by config or max_loop */ | ||
1846 | mutex_lock(&loop_index_mutex); | ||
1847 | for (i = 0; i < nr; i++) | ||
1848 | loop_add(&lo, i); | ||
1849 | mutex_unlock(&loop_index_mutex); | ||
1850 | |||
1737 | printk(KERN_INFO "loop: module loaded\n"); | 1851 | printk(KERN_INFO "loop: module loaded\n"); |
1738 | return 0; | 1852 | return 0; |
1853 | } | ||
1739 | 1854 | ||
1740 | Enomem: | 1855 | static int loop_exit_cb(int id, void *ptr, void *data) |
1741 | printk(KERN_INFO "loop: out of memory\n"); | 1856 | { |
1742 | 1857 | struct loop_device *lo = ptr; | |
1743 | list_for_each_entry_safe(lo, next, &loop_devices, lo_list) | ||
1744 | loop_free(lo); | ||
1745 | 1858 | ||
1746 | unregister_blkdev(LOOP_MAJOR, "loop"); | 1859 | loop_remove(lo); |
1747 | return -ENOMEM; | 1860 | return 0; |
1748 | } | 1861 | } |
1749 | 1862 | ||
1750 | static void __exit loop_exit(void) | 1863 | static void __exit loop_exit(void) |
1751 | { | 1864 | { |
1752 | unsigned long range; | 1865 | unsigned long range; |
1753 | struct loop_device *lo, *next; | ||
1754 | 1866 | ||
1755 | range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; | 1867 | range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; |
1756 | 1868 | ||
1757 | list_for_each_entry_safe(lo, next, &loop_devices, lo_list) | 1869 | idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); |
1758 | loop_del_one(lo); | 1870 | idr_remove_all(&loop_index_idr); |
1871 | idr_destroy(&loop_index_idr); | ||
1759 | 1872 | ||
1760 | blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); | 1873 | blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); |
1761 | unregister_blkdev(LOOP_MAJOR, "loop"); | 1874 | unregister_blkdev(LOOP_MAJOR, "loop"); |
1875 | |||
1876 | misc_deregister(&loop_misc); | ||
1762 | } | 1877 | } |
1763 | 1878 | ||
1764 | module_init(loop_init); | 1879 | module_init(loop_init); |
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 773bfa792777..ae3e167e17ad 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c | |||
@@ -1184,6 +1184,7 @@ static struct of_device_id swim3_match[] = | |||
1184 | { | 1184 | { |
1185 | .compatible = "swim3" | 1185 | .compatible = "swim3" |
1186 | }, | 1186 | }, |
1187 | { /* end of list */ } | ||
1187 | }; | 1188 | }; |
1188 | 1189 | ||
1189 | static struct macio_driver swim3_driver = | 1190 | static struct macio_driver swim3_driver = |
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 9e40b283a468..00c57c90e2d6 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
@@ -46,7 +46,7 @@ | |||
46 | 46 | ||
47 | #define DRV_PFX "xen-blkback:" | 47 | #define DRV_PFX "xen-blkback:" |
48 | #define DPRINTK(fmt, args...) \ | 48 | #define DPRINTK(fmt, args...) \ |
49 | pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \ | 49 | pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \ |
50 | __func__, __LINE__, ##args) | 50 | __func__, __LINE__, ##args) |
51 | 51 | ||
52 | 52 | ||
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 3f129b45451a..5fd2010f7d2b 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
@@ -590,7 +590,7 @@ static void frontend_changed(struct xenbus_device *dev, | |||
590 | 590 | ||
591 | /* | 591 | /* |
592 | * Enforce precondition before potential leak point. | 592 | * Enforce precondition before potential leak point. |
593 | * blkif_disconnect() is idempotent. | 593 | * xen_blkif_disconnect() is idempotent. |
594 | */ | 594 | */ |
595 | xen_blkif_disconnect(be->blkif); | 595 | xen_blkif_disconnect(be->blkif); |
596 | 596 | ||
@@ -601,17 +601,17 @@ static void frontend_changed(struct xenbus_device *dev, | |||
601 | break; | 601 | break; |
602 | 602 | ||
603 | case XenbusStateClosing: | 603 | case XenbusStateClosing: |
604 | xen_blkif_disconnect(be->blkif); | ||
605 | xenbus_switch_state(dev, XenbusStateClosing); | 604 | xenbus_switch_state(dev, XenbusStateClosing); |
606 | break; | 605 | break; |
607 | 606 | ||
608 | case XenbusStateClosed: | 607 | case XenbusStateClosed: |
608 | xen_blkif_disconnect(be->blkif); | ||
609 | xenbus_switch_state(dev, XenbusStateClosed); | 609 | xenbus_switch_state(dev, XenbusStateClosed); |
610 | if (xenbus_dev_is_online(dev)) | 610 | if (xenbus_dev_is_online(dev)) |
611 | break; | 611 | break; |
612 | /* fall through if not online */ | 612 | /* fall through if not online */ |
613 | case XenbusStateUnknown: | 613 | case XenbusStateUnknown: |
614 | /* implies blkif_disconnect() via blkback_remove() */ | 614 | /* implies xen_blkif_disconnect() via xen_blkbk_remove() */ |
615 | device_unregister(&dev->dev); | 615 | device_unregister(&dev->dev); |
616 | break; | 616 | break; |
617 | 617 | ||
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index b536a9cef917..9ea8c2576c70 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -123,8 +123,8 @@ static DEFINE_SPINLOCK(minor_lock); | |||
123 | #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) | 123 | #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) |
124 | #define EMULATED_HD_DISK_MINOR_OFFSET (0) | 124 | #define EMULATED_HD_DISK_MINOR_OFFSET (0) |
125 | #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256) | 125 | #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256) |
126 | #define EMULATED_SD_DISK_MINOR_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET + (4 * 16)) | 126 | #define EMULATED_SD_DISK_MINOR_OFFSET (0) |
127 | #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_HD_DISK_NAME_OFFSET + 4) | 127 | #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256) |
128 | 128 | ||
129 | #define DEV_NAME "xvd" /* name in /dev */ | 129 | #define DEV_NAME "xvd" /* name in /dev */ |
130 | 130 | ||
@@ -529,7 +529,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, | |||
529 | minor = BLKIF_MINOR_EXT(info->vdevice); | 529 | minor = BLKIF_MINOR_EXT(info->vdevice); |
530 | nr_parts = PARTS_PER_EXT_DISK; | 530 | nr_parts = PARTS_PER_EXT_DISK; |
531 | offset = minor / nr_parts; | 531 | offset = minor / nr_parts; |
532 | if (xen_hvm_domain() && offset <= EMULATED_HD_DISK_NAME_OFFSET + 4) | 532 | if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4) |
533 | printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with " | 533 | printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with " |
534 | "emulated IDE disks,\n\t choose an xvd device name" | 534 | "emulated IDE disks,\n\t choose an xvd device name" |
535 | "from xvde on\n", info->vdevice); | 535 | "from xvde on\n", info->vdevice); |
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index a5854735bb2e..db7cb8111fbe 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c | |||
@@ -63,6 +63,7 @@ static struct usb_device_id ath3k_table[] = { | |||
63 | /* Atheros AR3011 with sflash firmware*/ | 63 | /* Atheros AR3011 with sflash firmware*/ |
64 | { USB_DEVICE(0x0CF3, 0x3002) }, | 64 | { USB_DEVICE(0x0CF3, 0x3002) }, |
65 | { USB_DEVICE(0x13d3, 0x3304) }, | 65 | { USB_DEVICE(0x13d3, 0x3304) }, |
66 | { USB_DEVICE(0x0930, 0x0215) }, | ||
66 | 67 | ||
67 | /* Atheros AR9285 Malbec with sflash firmware */ | 68 | /* Atheros AR9285 Malbec with sflash firmware */ |
68 | { USB_DEVICE(0x03F0, 0x311D) }, | 69 | { USB_DEVICE(0x03F0, 0x311D) }, |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 91d13a9e8c65..9cbac6b445e1 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -72,9 +72,15 @@ static struct usb_device_id btusb_table[] = { | |||
72 | /* Apple MacBookAir3,1, MacBookAir3,2 */ | 72 | /* Apple MacBookAir3,1, MacBookAir3,2 */ |
73 | { USB_DEVICE(0x05ac, 0x821b) }, | 73 | { USB_DEVICE(0x05ac, 0x821b) }, |
74 | 74 | ||
75 | /* Apple MacBookAir4,1 */ | ||
76 | { USB_DEVICE(0x05ac, 0x821f) }, | ||
77 | |||
75 | /* Apple MacBookPro8,2 */ | 78 | /* Apple MacBookPro8,2 */ |
76 | { USB_DEVICE(0x05ac, 0x821a) }, | 79 | { USB_DEVICE(0x05ac, 0x821a) }, |
77 | 80 | ||
81 | /* Apple MacMini5,1 */ | ||
82 | { USB_DEVICE(0x05ac, 0x8281) }, | ||
83 | |||
78 | /* AVM BlueFRITZ! USB v2.0 */ | 84 | /* AVM BlueFRITZ! USB v2.0 */ |
79 | { USB_DEVICE(0x057c, 0x3800) }, | 85 | { USB_DEVICE(0x057c, 0x3800) }, |
80 | 86 | ||
@@ -106,6 +112,7 @@ static struct usb_device_id blacklist_table[] = { | |||
106 | /* Atheros 3011 with sflash firmware */ | 112 | /* Atheros 3011 with sflash firmware */ |
107 | { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, | 113 | { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, |
108 | { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, | 114 | { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, |
115 | { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, | ||
109 | 116 | ||
110 | /* Atheros AR9285 Malbec with sflash firmware */ | 117 | /* Atheros AR9285 Malbec with sflash firmware */ |
111 | { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, | 118 | { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, |
@@ -256,7 +263,9 @@ static void btusb_intr_complete(struct urb *urb) | |||
256 | 263 | ||
257 | err = usb_submit_urb(urb, GFP_ATOMIC); | 264 | err = usb_submit_urb(urb, GFP_ATOMIC); |
258 | if (err < 0) { | 265 | if (err < 0) { |
259 | if (err != -EPERM) | 266 | /* -EPERM: urb is being killed; |
267 | * -ENODEV: device got disconnected */ | ||
268 | if (err != -EPERM && err != -ENODEV) | ||
260 | BT_ERR("%s urb %p failed to resubmit (%d)", | 269 | BT_ERR("%s urb %p failed to resubmit (%d)", |
261 | hdev->name, urb, -err); | 270 | hdev->name, urb, -err); |
262 | usb_unanchor_urb(urb); | 271 | usb_unanchor_urb(urb); |
@@ -341,7 +350,9 @@ static void btusb_bulk_complete(struct urb *urb) | |||
341 | 350 | ||
342 | err = usb_submit_urb(urb, GFP_ATOMIC); | 351 | err = usb_submit_urb(urb, GFP_ATOMIC); |
343 | if (err < 0) { | 352 | if (err < 0) { |
344 | if (err != -EPERM) | 353 | /* -EPERM: urb is being killed; |
354 | * -ENODEV: device got disconnected */ | ||
355 | if (err != -EPERM && err != -ENODEV) | ||
345 | BT_ERR("%s urb %p failed to resubmit (%d)", | 356 | BT_ERR("%s urb %p failed to resubmit (%d)", |
346 | hdev->name, urb, -err); | 357 | hdev->name, urb, -err); |
347 | usb_unanchor_urb(urb); | 358 | usb_unanchor_urb(urb); |
@@ -431,7 +442,9 @@ static void btusb_isoc_complete(struct urb *urb) | |||
431 | 442 | ||
432 | err = usb_submit_urb(urb, GFP_ATOMIC); | 443 | err = usb_submit_urb(urb, GFP_ATOMIC); |
433 | if (err < 0) { | 444 | if (err < 0) { |
434 | if (err != -EPERM) | 445 | /* -EPERM: urb is being killed; |
446 | * -ENODEV: device got disconnected */ | ||
447 | if (err != -EPERM && err != -ENODEV) | ||
435 | BT_ERR("%s urb %p failed to resubmit (%d)", | 448 | BT_ERR("%s urb %p failed to resubmit (%d)", |
436 | hdev->name, urb, -err); | 449 | hdev->name, urb, -err); |
437 | usb_unanchor_urb(urb); | 450 | usb_unanchor_urb(urb); |
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c index 65d27aff553a..04d353f58d71 100644 --- a/drivers/bluetooth/btwilink.c +++ b/drivers/bluetooth/btwilink.c | |||
@@ -125,6 +125,13 @@ static long st_receive(void *priv_data, struct sk_buff *skb) | |||
125 | /* protocol structure registered with shared transport */ | 125 | /* protocol structure registered with shared transport */ |
126 | static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = { | 126 | static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = { |
127 | { | 127 | { |
128 | .chnl_id = HCI_EVENT_PKT, /* HCI Events */ | ||
129 | .hdr_len = sizeof(struct hci_event_hdr), | ||
130 | .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen), | ||
131 | .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */ | ||
132 | .reserve = 8, | ||
133 | }, | ||
134 | { | ||
128 | .chnl_id = HCI_ACLDATA_PKT, /* ACL */ | 135 | .chnl_id = HCI_ACLDATA_PKT, /* ACL */ |
129 | .hdr_len = sizeof(struct hci_acl_hdr), | 136 | .hdr_len = sizeof(struct hci_acl_hdr), |
130 | .offset_len_in_hdr = offsetof(struct hci_acl_hdr, dlen), | 137 | .offset_len_in_hdr = offsetof(struct hci_acl_hdr, dlen), |
@@ -138,13 +145,6 @@ static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = { | |||
138 | .len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */ | 145 | .len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */ |
139 | .reserve = 8, | 146 | .reserve = 8, |
140 | }, | 147 | }, |
141 | { | ||
142 | .chnl_id = HCI_EVENT_PKT, /* HCI Events */ | ||
143 | .hdr_len = sizeof(struct hci_event_hdr), | ||
144 | .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen), | ||
145 | .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */ | ||
146 | .reserve = 8, | ||
147 | }, | ||
148 | }; | 148 | }; |
149 | 149 | ||
150 | /* Called from HCI core to initialize the device */ | 150 | /* Called from HCI core to initialize the device */ |
@@ -240,7 +240,7 @@ static int ti_st_close(struct hci_dev *hdev) | |||
240 | if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) | 240 | if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) |
241 | return 0; | 241 | return 0; |
242 | 242 | ||
243 | for (i = 0; i < MAX_BT_CHNL_IDS; i++) { | 243 | for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) { |
244 | err = st_unregister(&ti_st_proto[i]); | 244 | err = st_unregister(&ti_st_proto[i]); |
245 | if (err) | 245 | if (err) |
246 | BT_ERR("st_unregister(%d) failed with error %d", | 246 | BT_ERR("st_unregister(%d) failed with error %d", |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 75fb965b8f72..f997c27d79e2 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -1929,11 +1929,17 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s, | |||
1929 | goto out; | 1929 | goto out; |
1930 | 1930 | ||
1931 | s->manufact.len = buf[0] << 8 | buf[1]; | 1931 | s->manufact.len = buf[0] << 8 | buf[1]; |
1932 | if (s->manufact.len < 0 || s->manufact.len > 2048) { | 1932 | if (s->manufact.len < 0) { |
1933 | cdinfo(CD_WARNING, "Received invalid manufacture info length" | 1933 | cdinfo(CD_WARNING, "Received invalid manufacture info length" |
1934 | " (%d)\n", s->manufact.len); | 1934 | " (%d)\n", s->manufact.len); |
1935 | ret = -EIO; | 1935 | ret = -EIO; |
1936 | } else { | 1936 | } else { |
1937 | if (s->manufact.len > 2048) { | ||
1938 | cdinfo(CD_WARNING, "Received invalid manufacture info " | ||
1939 | "length (%d): truncating to 2048\n", | ||
1940 | s->manufact.len); | ||
1941 | s->manufact.len = 2048; | ||
1942 | } | ||
1937 | memcpy(s->manufact.value, &buf[4], s->manufact.len); | 1943 | memcpy(s->manufact.value, &buf[4], s->manufact.len); |
1938 | } | 1944 | } |
1939 | 1945 | ||
diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c index b6f8a65c9960..8eca55deb3a3 100644 --- a/drivers/char/msm_smd_pkt.c +++ b/drivers/char/msm_smd_pkt.c | |||
@@ -379,9 +379,8 @@ static int __init smd_pkt_init(void) | |||
379 | for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) { | 379 | for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) { |
380 | smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev), | 380 | smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev), |
381 | GFP_KERNEL); | 381 | GFP_KERNEL); |
382 | if (IS_ERR(smd_pkt_devp[i])) { | 382 | if (!smd_pkt_devp[i]) { |
383 | r = PTR_ERR(smd_pkt_devp[i]); | 383 | pr_err("kmalloc() failed\n"); |
384 | pr_err("kmalloc() failed %d\n", r); | ||
385 | goto clean_cdevs; | 384 | goto clean_cdevs; |
386 | } | 385 | } |
387 | 386 | ||
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index dc7c033ef587..32a77becc098 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
27 | #include <linux/irq.h> | 27 | #include <linux/irq.h> |
28 | #include <linux/err.h> | 28 | #include <linux/err.h> |
29 | #include <linux/delay.h> | ||
29 | #include <linux/clocksource.h> | 30 | #include <linux/clocksource.h> |
30 | #include <linux/clockchips.h> | 31 | #include <linux/clockchips.h> |
31 | #include <linux/sh_timer.h> | 32 | #include <linux/sh_timer.h> |
@@ -150,13 +151,13 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) | |||
150 | 151 | ||
151 | static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) | 152 | static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) |
152 | { | 153 | { |
153 | int ret; | 154 | int k, ret; |
154 | 155 | ||
155 | /* enable clock */ | 156 | /* enable clock */ |
156 | ret = clk_enable(p->clk); | 157 | ret = clk_enable(p->clk); |
157 | if (ret) { | 158 | if (ret) { |
158 | dev_err(&p->pdev->dev, "cannot enable clock\n"); | 159 | dev_err(&p->pdev->dev, "cannot enable clock\n"); |
159 | return ret; | 160 | goto err0; |
160 | } | 161 | } |
161 | 162 | ||
162 | /* make sure channel is disabled */ | 163 | /* make sure channel is disabled */ |
@@ -174,9 +175,38 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) | |||
174 | sh_cmt_write(p, CMCOR, 0xffffffff); | 175 | sh_cmt_write(p, CMCOR, 0xffffffff); |
175 | sh_cmt_write(p, CMCNT, 0); | 176 | sh_cmt_write(p, CMCNT, 0); |
176 | 177 | ||
178 | /* | ||
179 | * According to the sh73a0 user's manual, as CMCNT can be operated | ||
180 | * only by the RCLK (Pseudo 32 KHz), there's one restriction on | ||
181 | * modifying CMCNT register; two RCLK cycles are necessary before | ||
182 | * this register is either read or any modification of the value | ||
183 | * it holds is reflected in the LSI's actual operation. | ||
184 | * | ||
185 | * While at it, we're supposed to clear out the CMCNT as of this | ||
186 | * moment, so make sure it's processed properly here. This will | ||
187 | * take RCLKx2 at maximum. | ||
188 | */ | ||
189 | for (k = 0; k < 100; k++) { | ||
190 | if (!sh_cmt_read(p, CMCNT)) | ||
191 | break; | ||
192 | udelay(1); | ||
193 | } | ||
194 | |||
195 | if (sh_cmt_read(p, CMCNT)) { | ||
196 | dev_err(&p->pdev->dev, "cannot clear CMCNT\n"); | ||
197 | ret = -ETIMEDOUT; | ||
198 | goto err1; | ||
199 | } | ||
200 | |||
177 | /* enable channel */ | 201 | /* enable channel */ |
178 | sh_cmt_start_stop_ch(p, 1); | 202 | sh_cmt_start_stop_ch(p, 1); |
179 | return 0; | 203 | return 0; |
204 | err1: | ||
205 | /* stop clock */ | ||
206 | clk_disable(p->clk); | ||
207 | |||
208 | err0: | ||
209 | return ret; | ||
180 | } | 210 | } |
181 | 211 | ||
182 | static void sh_cmt_disable(struct sh_cmt_priv *p) | 212 | static void sh_cmt_disable(struct sh_cmt_priv *p) |
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 7b0603eb0129..cdc02ac8f41a 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c | |||
@@ -261,6 +261,9 @@ static int pcc_get_offset(int cpu) | |||
261 | pr = per_cpu(processors, cpu); | 261 | pr = per_cpu(processors, cpu); |
262 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); | 262 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); |
263 | 263 | ||
264 | if (!pr) | ||
265 | return -ENODEV; | ||
266 | |||
264 | status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer); | 267 | status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer); |
265 | if (ACPI_FAILURE(status)) | 268 | if (ACPI_FAILURE(status)) |
266 | return -ENODEV; | 269 | return -ENODEV; |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index cd3a7c726bf8..467e4dcb20a0 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -174,8 +174,10 @@ struct d40_base; | |||
174 | * @tasklet: Tasklet that gets scheduled from interrupt context to complete a | 174 | * @tasklet: Tasklet that gets scheduled from interrupt context to complete a |
175 | * transfer and call client callback. | 175 | * transfer and call client callback. |
176 | * @client: Cliented owned descriptor list. | 176 | * @client: Cliented owned descriptor list. |
177 | * @pending_queue: Submitted jobs, to be issued by issue_pending() | ||
177 | * @active: Active descriptor. | 178 | * @active: Active descriptor. |
178 | * @queue: Queued jobs. | 179 | * @queue: Queued jobs. |
180 | * @prepare_queue: Prepared jobs. | ||
179 | * @dma_cfg: The client configuration of this dma channel. | 181 | * @dma_cfg: The client configuration of this dma channel. |
180 | * @configured: whether the dma_cfg configuration is valid | 182 | * @configured: whether the dma_cfg configuration is valid |
181 | * @base: Pointer to the device instance struct. | 183 | * @base: Pointer to the device instance struct. |
@@ -203,6 +205,7 @@ struct d40_chan { | |||
203 | struct list_head pending_queue; | 205 | struct list_head pending_queue; |
204 | struct list_head active; | 206 | struct list_head active; |
205 | struct list_head queue; | 207 | struct list_head queue; |
208 | struct list_head prepare_queue; | ||
206 | struct stedma40_chan_cfg dma_cfg; | 209 | struct stedma40_chan_cfg dma_cfg; |
207 | bool configured; | 210 | bool configured; |
208 | struct d40_base *base; | 211 | struct d40_base *base; |
@@ -477,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | |||
477 | 480 | ||
478 | list_for_each_entry_safe(d, _d, &d40c->client, node) | 481 | list_for_each_entry_safe(d, _d, &d40c->client, node) |
479 | if (async_tx_test_ack(&d->txd)) { | 482 | if (async_tx_test_ack(&d->txd)) { |
480 | d40_pool_lli_free(d40c, d); | ||
481 | d40_desc_remove(d); | 483 | d40_desc_remove(d); |
482 | desc = d; | 484 | desc = d; |
483 | memset(desc, 0, sizeof(*desc)); | 485 | memset(desc, 0, sizeof(*desc)); |
@@ -644,8 +646,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) | |||
644 | return d; | 646 | return d; |
645 | } | 647 | } |
646 | 648 | ||
649 | /* remove desc from current queue and add it to the pending_queue */ | ||
647 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) | 650 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) |
648 | { | 651 | { |
652 | d40_desc_remove(desc); | ||
653 | desc->is_in_client_list = false; | ||
649 | list_add_tail(&desc->node, &d40c->pending_queue); | 654 | list_add_tail(&desc->node, &d40c->pending_queue); |
650 | } | 655 | } |
651 | 656 | ||
@@ -803,6 +808,7 @@ done: | |||
803 | static void d40_term_all(struct d40_chan *d40c) | 808 | static void d40_term_all(struct d40_chan *d40c) |
804 | { | 809 | { |
805 | struct d40_desc *d40d; | 810 | struct d40_desc *d40d; |
811 | struct d40_desc *_d; | ||
806 | 812 | ||
807 | /* Release active descriptors */ | 813 | /* Release active descriptors */ |
808 | while ((d40d = d40_first_active_get(d40c))) { | 814 | while ((d40d = d40_first_active_get(d40c))) { |
@@ -822,6 +828,21 @@ static void d40_term_all(struct d40_chan *d40c) | |||
822 | d40_desc_free(d40c, d40d); | 828 | d40_desc_free(d40c, d40d); |
823 | } | 829 | } |
824 | 830 | ||
831 | /* Release client owned descriptors */ | ||
832 | if (!list_empty(&d40c->client)) | ||
833 | list_for_each_entry_safe(d40d, _d, &d40c->client, node) { | ||
834 | d40_desc_remove(d40d); | ||
835 | d40_desc_free(d40c, d40d); | ||
836 | } | ||
837 | |||
838 | /* Release descriptors in prepare queue */ | ||
839 | if (!list_empty(&d40c->prepare_queue)) | ||
840 | list_for_each_entry_safe(d40d, _d, | ||
841 | &d40c->prepare_queue, node) { | ||
842 | d40_desc_remove(d40d); | ||
843 | d40_desc_free(d40c, d40d); | ||
844 | } | ||
845 | |||
825 | d40c->pending_tx = 0; | 846 | d40c->pending_tx = 0; |
826 | d40c->busy = false; | 847 | d40c->busy = false; |
827 | } | 848 | } |
@@ -1208,7 +1229,6 @@ static void dma_tasklet(unsigned long data) | |||
1208 | 1229 | ||
1209 | if (!d40d->cyclic) { | 1230 | if (!d40d->cyclic) { |
1210 | if (async_tx_test_ack(&d40d->txd)) { | 1231 | if (async_tx_test_ack(&d40d->txd)) { |
1211 | d40_pool_lli_free(d40c, d40d); | ||
1212 | d40_desc_remove(d40d); | 1232 | d40_desc_remove(d40d); |
1213 | d40_desc_free(d40c, d40d); | 1233 | d40_desc_free(d40c, d40d); |
1214 | } else { | 1234 | } else { |
@@ -1595,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1595 | u32 event; | 1615 | u32 event; |
1596 | struct d40_phy_res *phy = d40c->phy_chan; | 1616 | struct d40_phy_res *phy = d40c->phy_chan; |
1597 | bool is_src; | 1617 | bool is_src; |
1598 | struct d40_desc *d; | ||
1599 | struct d40_desc *_d; | ||
1600 | |||
1601 | 1618 | ||
1602 | /* Terminate all queued and active transfers */ | 1619 | /* Terminate all queued and active transfers */ |
1603 | d40_term_all(d40c); | 1620 | d40_term_all(d40c); |
1604 | 1621 | ||
1605 | /* Release client owned descriptors */ | ||
1606 | if (!list_empty(&d40c->client)) | ||
1607 | list_for_each_entry_safe(d, _d, &d40c->client, node) { | ||
1608 | d40_pool_lli_free(d40c, d); | ||
1609 | d40_desc_remove(d); | ||
1610 | d40_desc_free(d40c, d); | ||
1611 | } | ||
1612 | |||
1613 | if (phy == NULL) { | 1622 | if (phy == NULL) { |
1614 | chan_err(d40c, "phy == null\n"); | 1623 | chan_err(d40c, "phy == null\n"); |
1615 | return -EINVAL; | 1624 | return -EINVAL; |
@@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | |||
1911 | goto err; | 1920 | goto err; |
1912 | } | 1921 | } |
1913 | 1922 | ||
1923 | /* | ||
1924 | * add descriptor to the prepare queue in order to be able | ||
1925 | * to free them later in terminate_all | ||
1926 | */ | ||
1927 | list_add_tail(&desc->node, &chan->prepare_queue); | ||
1928 | |||
1914 | spin_unlock_irqrestore(&chan->lock, flags); | 1929 | spin_unlock_irqrestore(&chan->lock, flags); |
1915 | 1930 | ||
1916 | return &desc->txd; | 1931 | return &desc->txd; |
@@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | |||
2400 | INIT_LIST_HEAD(&d40c->queue); | 2415 | INIT_LIST_HEAD(&d40c->queue); |
2401 | INIT_LIST_HEAD(&d40c->pending_queue); | 2416 | INIT_LIST_HEAD(&d40c->pending_queue); |
2402 | INIT_LIST_HEAD(&d40c->client); | 2417 | INIT_LIST_HEAD(&d40c->client); |
2418 | INIT_LIST_HEAD(&d40c->prepare_queue); | ||
2403 | 2419 | ||
2404 | tasklet_init(&d40c->tasklet, dma_tasklet, | 2420 | tasklet_init(&d40c->tasklet, dma_tasklet, |
2405 | (unsigned long) d40c); | 2421 | (unsigned long) d40c); |
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 04f1e7ce02b1..f6cf448d69b4 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c | |||
@@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci, | |||
1670 | char *type, *optype, *err, *msg; | 1670 | char *type, *optype, *err, *msg; |
1671 | unsigned long error = m->status & 0x1ff0000l; | 1671 | unsigned long error = m->status & 0x1ff0000l; |
1672 | u32 optypenum = (m->status >> 4) & 0x07; | 1672 | u32 optypenum = (m->status >> 4) & 0x07; |
1673 | u32 core_err_cnt = (m->status >> 38) && 0x7fff; | 1673 | u32 core_err_cnt = (m->status >> 38) & 0x7fff; |
1674 | u32 dimm = (m->misc >> 16) & 0x3; | 1674 | u32 dimm = (m->misc >> 16) & 0x3; |
1675 | u32 channel = (m->misc >> 18) & 0x3; | 1675 | u32 channel = (m->misc >> 18) & 0x3; |
1676 | u32 syndrome = m->misc >> 32; | 1676 | u32 syndrome = m->misc >> 32; |
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index e6ad3bb6c1a6..4799393247c8 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
@@ -216,15 +216,33 @@ struct inbound_phy_packet_event { | |||
216 | struct fw_cdev_event_phy_packet phy_packet; | 216 | struct fw_cdev_event_phy_packet phy_packet; |
217 | }; | 217 | }; |
218 | 218 | ||
219 | static inline void __user *u64_to_uptr(__u64 value) | 219 | #ifdef CONFIG_COMPAT |
220 | static void __user *u64_to_uptr(u64 value) | ||
221 | { | ||
222 | if (is_compat_task()) | ||
223 | return compat_ptr(value); | ||
224 | else | ||
225 | return (void __user *)(unsigned long)value; | ||
226 | } | ||
227 | |||
228 | static u64 uptr_to_u64(void __user *ptr) | ||
229 | { | ||
230 | if (is_compat_task()) | ||
231 | return ptr_to_compat(ptr); | ||
232 | else | ||
233 | return (u64)(unsigned long)ptr; | ||
234 | } | ||
235 | #else | ||
236 | static inline void __user *u64_to_uptr(u64 value) | ||
220 | { | 237 | { |
221 | return (void __user *)(unsigned long)value; | 238 | return (void __user *)(unsigned long)value; |
222 | } | 239 | } |
223 | 240 | ||
224 | static inline __u64 uptr_to_u64(void __user *ptr) | 241 | static inline u64 uptr_to_u64(void __user *ptr) |
225 | { | 242 | { |
226 | return (__u64)(unsigned long)ptr; | 243 | return (u64)(unsigned long)ptr; |
227 | } | 244 | } |
245 | #endif /* CONFIG_COMPAT */ | ||
228 | 246 | ||
229 | static int fw_device_op_open(struct inode *inode, struct file *file) | 247 | static int fw_device_op_open(struct inode *inode, struct file *file) |
230 | { | 248 | { |
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 8ba7f7928f1f..f3b890da1e87 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c | |||
@@ -455,15 +455,20 @@ static struct device_attribute fw_device_attributes[] = { | |||
455 | static int read_rom(struct fw_device *device, | 455 | static int read_rom(struct fw_device *device, |
456 | int generation, int index, u32 *data) | 456 | int generation, int index, u32 *data) |
457 | { | 457 | { |
458 | int rcode; | 458 | u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4; |
459 | int i, rcode; | ||
459 | 460 | ||
460 | /* device->node_id, accessed below, must not be older than generation */ | 461 | /* device->node_id, accessed below, must not be older than generation */ |
461 | smp_rmb(); | 462 | smp_rmb(); |
462 | 463 | ||
463 | rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST, | 464 | for (i = 10; i < 100; i += 10) { |
464 | device->node_id, generation, device->max_speed, | 465 | rcode = fw_run_transaction(device->card, |
465 | (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4, | 466 | TCODE_READ_QUADLET_REQUEST, device->node_id, |
466 | data, 4); | 467 | generation, device->max_speed, offset, data, 4); |
468 | if (rcode != RCODE_BUSY) | ||
469 | break; | ||
470 | msleep(i); | ||
471 | } | ||
467 | be32_to_cpus(data); | 472 | be32_to_cpus(data); |
468 | 473 | ||
469 | return rcode; | 474 | return rcode; |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index bcf792fac442..fd7170a9ad2c 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -290,6 +290,9 @@ static const struct { | |||
290 | {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, | 290 | {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, |
291 | QUIRK_CYCLE_TIMER}, | 291 | QUIRK_CYCLE_TIMER}, |
292 | 292 | ||
293 | {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID, | ||
294 | QUIRK_NO_MSI}, | ||
295 | |||
293 | {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, | 296 | {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, |
294 | QUIRK_CYCLE_TIMER}, | 297 | QUIRK_CYCLE_TIMER}, |
295 | 298 | ||
@@ -2179,8 +2182,13 @@ static int ohci_enable(struct fw_card *card, | |||
2179 | ohci_driver_name, ohci)) { | 2182 | ohci_driver_name, ohci)) { |
2180 | fw_error("Failed to allocate interrupt %d.\n", dev->irq); | 2183 | fw_error("Failed to allocate interrupt %d.\n", dev->irq); |
2181 | pci_disable_msi(dev); | 2184 | pci_disable_msi(dev); |
2182 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 2185 | |
2183 | ohci->config_rom, ohci->config_rom_bus); | 2186 | if (config_rom) { |
2187 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | ||
2188 | ohci->next_config_rom, | ||
2189 | ohci->next_config_rom_bus); | ||
2190 | ohci->next_config_rom = NULL; | ||
2191 | } | ||
2184 | return -EIO; | 2192 | return -EIO; |
2185 | } | 2193 | } |
2186 | 2194 | ||
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 41841a3e3f99..17cef864506a 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
@@ -1198,6 +1198,10 @@ static int sbp2_remove(struct device *dev) | |||
1198 | { | 1198 | { |
1199 | struct fw_unit *unit = fw_unit(dev); | 1199 | struct fw_unit *unit = fw_unit(dev); |
1200 | struct sbp2_target *tgt = dev_get_drvdata(&unit->device); | 1200 | struct sbp2_target *tgt = dev_get_drvdata(&unit->device); |
1201 | struct sbp2_logical_unit *lu; | ||
1202 | |||
1203 | list_for_each_entry(lu, &tgt->lu_list, link) | ||
1204 | cancel_delayed_work_sync(&lu->work); | ||
1201 | 1205 | ||
1202 | sbp2_target_put(tgt); | 1206 | sbp2_target_put(tgt); |
1203 | return 0; | 1207 | return 0; |
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index 68810fd1a59d..aa83de9db1b9 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c | |||
@@ -420,7 +420,7 @@ static efi_status_t gsmi_get_next_variable(unsigned long *name_size, | |||
420 | 420 | ||
421 | static efi_status_t gsmi_set_variable(efi_char16_t *name, | 421 | static efi_status_t gsmi_set_variable(efi_char16_t *name, |
422 | efi_guid_t *vendor, | 422 | efi_guid_t *vendor, |
423 | unsigned long attr, | 423 | u32 attr, |
424 | unsigned long data_size, | 424 | unsigned long data_size, |
425 | void *data) | 425 | void *data) |
426 | { | 426 | { |
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c index 231714def4d2..4e24436b0f82 100644 --- a/drivers/gpio/gpio-generic.c +++ b/drivers/gpio/gpio-generic.c | |||
@@ -351,7 +351,7 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc, | |||
351 | return 0; | 351 | return 0; |
352 | } | 352 | } |
353 | 353 | ||
354 | int __devexit bgpio_remove(struct bgpio_chip *bgc) | 354 | int bgpio_remove(struct bgpio_chip *bgc) |
355 | { | 355 | { |
356 | int err = gpiochip_remove(&bgc->gc); | 356 | int err = gpiochip_remove(&bgc->gc); |
357 | 357 | ||
@@ -361,15 +361,10 @@ int __devexit bgpio_remove(struct bgpio_chip *bgc) | |||
361 | } | 361 | } |
362 | EXPORT_SYMBOL_GPL(bgpio_remove); | 362 | EXPORT_SYMBOL_GPL(bgpio_remove); |
363 | 363 | ||
364 | int __devinit bgpio_init(struct bgpio_chip *bgc, | 364 | int bgpio_init(struct bgpio_chip *bgc, struct device *dev, |
365 | struct device *dev, | 365 | unsigned long sz, void __iomem *dat, void __iomem *set, |
366 | unsigned long sz, | 366 | void __iomem *clr, void __iomem *dirout, void __iomem *dirin, |
367 | void __iomem *dat, | 367 | bool big_endian) |
368 | void __iomem *set, | ||
369 | void __iomem *clr, | ||
370 | void __iomem *dirout, | ||
371 | void __iomem *dirin, | ||
372 | bool big_endian) | ||
373 | { | 368 | { |
374 | int ret; | 369 | int ret; |
375 | 370 | ||
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 82db18506662..fe738f05309b 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -499,6 +499,7 @@ void drm_connector_cleanup(struct drm_connector *connector) | |||
499 | mutex_lock(&dev->mode_config.mutex); | 499 | mutex_lock(&dev->mode_config.mutex); |
500 | drm_mode_object_put(dev, &connector->base); | 500 | drm_mode_object_put(dev, &connector->base); |
501 | list_del(&connector->head); | 501 | list_del(&connector->head); |
502 | dev->mode_config.num_connector--; | ||
502 | mutex_unlock(&dev->mode_config.mutex); | 503 | mutex_unlock(&dev->mode_config.mutex); |
503 | } | 504 | } |
504 | EXPORT_SYMBOL(drm_connector_cleanup); | 505 | EXPORT_SYMBOL(drm_connector_cleanup); |
@@ -529,6 +530,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder) | |||
529 | mutex_lock(&dev->mode_config.mutex); | 530 | mutex_lock(&dev->mode_config.mutex); |
530 | drm_mode_object_put(dev, &encoder->base); | 531 | drm_mode_object_put(dev, &encoder->base); |
531 | list_del(&encoder->head); | 532 | list_del(&encoder->head); |
533 | dev->mode_config.num_encoder--; | ||
532 | mutex_unlock(&dev->mode_config.mutex); | 534 | mutex_unlock(&dev->mode_config.mutex); |
533 | } | 535 | } |
534 | EXPORT_SYMBOL(drm_encoder_cleanup); | 536 | EXPORT_SYMBOL(drm_encoder_cleanup); |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 802b61ac3139..f7c6854eb4dd 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -256,7 +256,6 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, | |||
256 | { | 256 | { |
257 | printk(KERN_ERR "panic occurred, switching back to text console\n"); | 257 | printk(KERN_ERR "panic occurred, switching back to text console\n"); |
258 | return drm_fb_helper_force_kernel_mode(); | 258 | return drm_fb_helper_force_kernel_mode(); |
259 | return 0; | ||
260 | } | 259 | } |
261 | EXPORT_SYMBOL(drm_fb_helper_panic); | 260 | EXPORT_SYMBOL(drm_fb_helper_panic); |
262 | 261 | ||
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a8ab6263e0d7..3c395a59da35 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) | |||
499 | seq_printf(m, "Interrupts received: %d\n", | 499 | seq_printf(m, "Interrupts received: %d\n", |
500 | atomic_read(&dev_priv->irq_received)); | 500 | atomic_read(&dev_priv->irq_received)); |
501 | for (i = 0; i < I915_NUM_RINGS; i++) { | 501 | for (i = 0; i < I915_NUM_RINGS; i++) { |
502 | if (IS_GEN6(dev)) { | 502 | if (IS_GEN6(dev) || IS_GEN7(dev)) { |
503 | seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", | 503 | seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", |
504 | dev_priv->ring[i].name, | 504 | dev_priv->ring[i].name, |
505 | I915_READ_IMR(&dev_priv->ring[i])); | 505 | I915_READ_IMR(&dev_priv->ring[i])); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index feb4f164fd1b..7916bd97d5c1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/io-mapping.h> | 36 | #include <linux/io-mapping.h> |
37 | #include <linux/i2c.h> | 37 | #include <linux/i2c.h> |
38 | #include <drm/intel-gtt.h> | 38 | #include <drm/intel-gtt.h> |
39 | #include <linux/backlight.h> | ||
39 | 40 | ||
40 | /* General customization: | 41 | /* General customization: |
41 | */ | 42 | */ |
@@ -690,6 +691,7 @@ typedef struct drm_i915_private { | |||
690 | int child_dev_num; | 691 | int child_dev_num; |
691 | struct child_device_config *child_dev; | 692 | struct child_device_config *child_dev; |
692 | struct drm_connector *int_lvds_connector; | 693 | struct drm_connector *int_lvds_connector; |
694 | struct drm_connector *int_edp_connector; | ||
693 | 695 | ||
694 | bool mchbar_need_disable; | 696 | bool mchbar_need_disable; |
695 | 697 | ||
@@ -723,6 +725,8 @@ typedef struct drm_i915_private { | |||
723 | /* list of fbdev register on this device */ | 725 | /* list of fbdev register on this device */ |
724 | struct intel_fbdev *fbdev; | 726 | struct intel_fbdev *fbdev; |
725 | 727 | ||
728 | struct backlight_device *backlight; | ||
729 | |||
726 | struct drm_property *broadcast_rgb_property; | 730 | struct drm_property *broadcast_rgb_property; |
727 | struct drm_property *force_audio_property; | 731 | struct drm_property *force_audio_property; |
728 | 732 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 02f96fd0d52d..9cbb0cd8f46a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -2058,8 +2058,10 @@ void intel_irq_init(struct drm_device *dev) | |||
2058 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | 2058 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; |
2059 | } | 2059 | } |
2060 | 2060 | ||
2061 | 2061 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
2062 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; | 2062 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; |
2063 | else | ||
2064 | dev->driver->get_vblank_timestamp = NULL; | ||
2063 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; | 2065 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; |
2064 | 2066 | ||
2065 | if (IS_IVYBRIDGE(dev)) { | 2067 | if (IS_IVYBRIDGE(dev)) { |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d1331f771e2f..542453f7498c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -375,6 +375,7 @@ | |||
375 | # define MI_FLUSH_ENABLE (1 << 11) | 375 | # define MI_FLUSH_ENABLE (1 << 11) |
376 | 376 | ||
377 | #define GFX_MODE 0x02520 | 377 | #define GFX_MODE 0x02520 |
378 | #define GFX_MODE_GEN7 0x0229c | ||
378 | #define GFX_RUN_LIST_ENABLE (1<<15) | 379 | #define GFX_RUN_LIST_ENABLE (1<<15) |
379 | #define GFX_TLB_INVALIDATE_ALWAYS (1<<13) | 380 | #define GFX_TLB_INVALIDATE_ALWAYS (1<<13) |
380 | #define GFX_SURFACE_FAULT_ENABLE (1<<12) | 381 | #define GFX_SURFACE_FAULT_ENABLE (1<<12) |
@@ -382,6 +383,9 @@ | |||
382 | #define GFX_PSMI_GRANULARITY (1<<10) | 383 | #define GFX_PSMI_GRANULARITY (1<<10) |
383 | #define GFX_PPGTT_ENABLE (1<<9) | 384 | #define GFX_PPGTT_ENABLE (1<<9) |
384 | 385 | ||
386 | #define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit)) | ||
387 | #define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0)) | ||
388 | |||
385 | #define SCPD0 0x0209c /* 915+ only */ | 389 | #define SCPD0 0x0209c /* 915+ only */ |
386 | #define IER 0x020a0 | 390 | #define IER 0x020a0 |
387 | #define IIR 0x020a4 | 391 | #define IIR 0x020a4 |
@@ -1318,6 +1322,7 @@ | |||
1318 | #define ADPA_PIPE_SELECT_MASK (1<<30) | 1322 | #define ADPA_PIPE_SELECT_MASK (1<<30) |
1319 | #define ADPA_PIPE_A_SELECT 0 | 1323 | #define ADPA_PIPE_A_SELECT 0 |
1320 | #define ADPA_PIPE_B_SELECT (1<<30) | 1324 | #define ADPA_PIPE_B_SELECT (1<<30) |
1325 | #define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) | ||
1321 | #define ADPA_USE_VGA_HVPOLARITY (1<<15) | 1326 | #define ADPA_USE_VGA_HVPOLARITY (1<<15) |
1322 | #define ADPA_SETS_HVPOLARITY 0 | 1327 | #define ADPA_SETS_HVPOLARITY 0 |
1323 | #define ADPA_VSYNC_CNTL_DISABLE (1<<11) | 1328 | #define ADPA_VSYNC_CNTL_DISABLE (1<<11) |
@@ -1460,6 +1465,7 @@ | |||
1460 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ | 1465 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ |
1461 | #define LVDS_PIPEB_SELECT (1 << 30) | 1466 | #define LVDS_PIPEB_SELECT (1 << 30) |
1462 | #define LVDS_PIPE_MASK (1 << 30) | 1467 | #define LVDS_PIPE_MASK (1 << 30) |
1468 | #define LVDS_PIPE(pipe) ((pipe) << 30) | ||
1463 | /* LVDS dithering flag on 965/g4x platform */ | 1469 | /* LVDS dithering flag on 965/g4x platform */ |
1464 | #define LVDS_ENABLE_DITHER (1 << 25) | 1470 | #define LVDS_ENABLE_DITHER (1 << 25) |
1465 | /* LVDS sync polarity flags. Set to invert (i.e. negative) */ | 1471 | /* LVDS sync polarity flags. Set to invert (i.e. negative) */ |
@@ -1499,9 +1505,6 @@ | |||
1499 | #define LVDS_B0B3_POWER_DOWN (0 << 2) | 1505 | #define LVDS_B0B3_POWER_DOWN (0 << 2) |
1500 | #define LVDS_B0B3_POWER_UP (3 << 2) | 1506 | #define LVDS_B0B3_POWER_UP (3 << 2) |
1501 | 1507 | ||
1502 | #define LVDS_PIPE_ENABLED(V, P) \ | ||
1503 | (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN)) | ||
1504 | |||
1505 | /* Video Data Island Packet control */ | 1508 | /* Video Data Island Packet control */ |
1506 | #define VIDEO_DIP_DATA 0x61178 | 1509 | #define VIDEO_DIP_DATA 0x61178 |
1507 | #define VIDEO_DIP_CTL 0x61170 | 1510 | #define VIDEO_DIP_CTL 0x61170 |
@@ -3256,14 +3259,12 @@ | |||
3256 | #define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) | 3259 | #define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) |
3257 | #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) | 3260 | #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) |
3258 | 3261 | ||
3259 | #define ADPA_PIPE_ENABLED(V, P) \ | ||
3260 | (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE)) | ||
3261 | |||
3262 | /* or SDVOB */ | 3262 | /* or SDVOB */ |
3263 | #define HDMIB 0xe1140 | 3263 | #define HDMIB 0xe1140 |
3264 | #define PORT_ENABLE (1 << 31) | 3264 | #define PORT_ENABLE (1 << 31) |
3265 | #define TRANSCODER_A (0) | 3265 | #define TRANSCODER_A (0) |
3266 | #define TRANSCODER_B (1 << 30) | 3266 | #define TRANSCODER_B (1 << 30) |
3267 | #define TRANSCODER(pipe) ((pipe) << 30) | ||
3267 | #define TRANSCODER_MASK (1 << 30) | 3268 | #define TRANSCODER_MASK (1 << 30) |
3268 | #define COLOR_FORMAT_8bpc (0) | 3269 | #define COLOR_FORMAT_8bpc (0) |
3269 | #define COLOR_FORMAT_12bpc (3 << 26) | 3270 | #define COLOR_FORMAT_12bpc (3 << 26) |
@@ -3280,9 +3281,6 @@ | |||
3280 | #define HSYNC_ACTIVE_HIGH (1 << 3) | 3281 | #define HSYNC_ACTIVE_HIGH (1 << 3) |
3281 | #define PORT_DETECTED (1 << 2) | 3282 | #define PORT_DETECTED (1 << 2) |
3282 | 3283 | ||
3283 | #define HDMI_PIPE_ENABLED(V, P) \ | ||
3284 | (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE)) | ||
3285 | |||
3286 | /* PCH SDVOB multiplex with HDMIB */ | 3284 | /* PCH SDVOB multiplex with HDMIB */ |
3287 | #define PCH_SDVOB HDMIB | 3285 | #define PCH_SDVOB HDMIB |
3288 | 3286 | ||
@@ -3349,6 +3347,7 @@ | |||
3349 | #define PORT_TRANS_B_SEL_CPT (1<<29) | 3347 | #define PORT_TRANS_B_SEL_CPT (1<<29) |
3350 | #define PORT_TRANS_C_SEL_CPT (2<<29) | 3348 | #define PORT_TRANS_C_SEL_CPT (2<<29) |
3351 | #define PORT_TRANS_SEL_MASK (3<<29) | 3349 | #define PORT_TRANS_SEL_MASK (3<<29) |
3350 | #define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29) | ||
3352 | 3351 | ||
3353 | #define TRANS_DP_CTL_A 0xe0300 | 3352 | #define TRANS_DP_CTL_A 0xe0300 |
3354 | #define TRANS_DP_CTL_B 0xe1300 | 3353 | #define TRANS_DP_CTL_B 0xe1300 |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 87677d60d0df..f10742359ec9 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -871,7 +871,8 @@ int i915_restore_state(struct drm_device *dev) | |||
871 | } | 871 | } |
872 | mutex_unlock(&dev->struct_mutex); | 872 | mutex_unlock(&dev->struct_mutex); |
873 | 873 | ||
874 | intel_init_clock_gating(dev); | 874 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
875 | intel_init_clock_gating(dev); | ||
875 | 876 | ||
876 | if (IS_IRONLAKE_M(dev)) { | 877 | if (IS_IRONLAKE_M(dev)) { |
877 | ironlake_enable_drps(dev); | 878 | ironlake_enable_drps(dev); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 35364e68a091..56a8554d9039 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -878,7 +878,7 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv, | |||
878 | int pp_reg, lvds_reg; | 878 | int pp_reg, lvds_reg; |
879 | u32 val; | 879 | u32 val; |
880 | enum pipe panel_pipe = PIPE_A; | 880 | enum pipe panel_pipe = PIPE_A; |
881 | bool locked = locked; | 881 | bool locked = true; |
882 | 882 | ||
883 | if (HAS_PCH_SPLIT(dev_priv->dev)) { | 883 | if (HAS_PCH_SPLIT(dev_priv->dev)) { |
884 | pp_reg = PCH_PP_CONTROL; | 884 | pp_reg = PCH_PP_CONTROL; |
@@ -980,8 +980,8 @@ static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, | |||
980 | pipe_name(pipe)); | 980 | pipe_name(pipe)); |
981 | } | 981 | } |
982 | 982 | ||
983 | static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe, | 983 | static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, |
984 | int reg, u32 port_sel, u32 val) | 984 | enum pipe pipe, u32 port_sel, u32 val) |
985 | { | 985 | { |
986 | if ((val & DP_PORT_EN) == 0) | 986 | if ((val & DP_PORT_EN) == 0) |
987 | return false; | 987 | return false; |
@@ -998,11 +998,58 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe, | |||
998 | return true; | 998 | return true; |
999 | } | 999 | } |
1000 | 1000 | ||
1001 | static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, | ||
1002 | enum pipe pipe, u32 val) | ||
1003 | { | ||
1004 | if ((val & PORT_ENABLE) == 0) | ||
1005 | return false; | ||
1006 | |||
1007 | if (HAS_PCH_CPT(dev_priv->dev)) { | ||
1008 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) | ||
1009 | return false; | ||
1010 | } else { | ||
1011 | if ((val & TRANSCODER_MASK) != TRANSCODER(pipe)) | ||
1012 | return false; | ||
1013 | } | ||
1014 | return true; | ||
1015 | } | ||
1016 | |||
1017 | static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, | ||
1018 | enum pipe pipe, u32 val) | ||
1019 | { | ||
1020 | if ((val & LVDS_PORT_EN) == 0) | ||
1021 | return false; | ||
1022 | |||
1023 | if (HAS_PCH_CPT(dev_priv->dev)) { | ||
1024 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) | ||
1025 | return false; | ||
1026 | } else { | ||
1027 | if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) | ||
1028 | return false; | ||
1029 | } | ||
1030 | return true; | ||
1031 | } | ||
1032 | |||
1033 | static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, | ||
1034 | enum pipe pipe, u32 val) | ||
1035 | { | ||
1036 | if ((val & ADPA_DAC_ENABLE) == 0) | ||
1037 | return false; | ||
1038 | if (HAS_PCH_CPT(dev_priv->dev)) { | ||
1039 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) | ||
1040 | return false; | ||
1041 | } else { | ||
1042 | if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) | ||
1043 | return false; | ||
1044 | } | ||
1045 | return true; | ||
1046 | } | ||
1047 | |||
1001 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, | 1048 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, |
1002 | enum pipe pipe, int reg, u32 port_sel) | 1049 | enum pipe pipe, int reg, u32 port_sel) |
1003 | { | 1050 | { |
1004 | u32 val = I915_READ(reg); | 1051 | u32 val = I915_READ(reg); |
1005 | WARN(dp_pipe_enabled(dev_priv, pipe, reg, port_sel, val), | 1052 | WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), |
1006 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | 1053 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
1007 | reg, pipe_name(pipe)); | 1054 | reg, pipe_name(pipe)); |
1008 | } | 1055 | } |
@@ -1011,7 +1058,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, | |||
1011 | enum pipe pipe, int reg) | 1058 | enum pipe pipe, int reg) |
1012 | { | 1059 | { |
1013 | u32 val = I915_READ(reg); | 1060 | u32 val = I915_READ(reg); |
1014 | WARN(HDMI_PIPE_ENABLED(val, pipe), | 1061 | WARN(hdmi_pipe_enabled(dev_priv, val, pipe), |
1015 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | 1062 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
1016 | reg, pipe_name(pipe)); | 1063 | reg, pipe_name(pipe)); |
1017 | } | 1064 | } |
@@ -1028,13 +1075,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | |||
1028 | 1075 | ||
1029 | reg = PCH_ADPA; | 1076 | reg = PCH_ADPA; |
1030 | val = I915_READ(reg); | 1077 | val = I915_READ(reg); |
1031 | WARN(ADPA_PIPE_ENABLED(val, pipe), | 1078 | WARN(adpa_pipe_enabled(dev_priv, val, pipe), |
1032 | "PCH VGA enabled on transcoder %c, should be disabled\n", | 1079 | "PCH VGA enabled on transcoder %c, should be disabled\n", |
1033 | pipe_name(pipe)); | 1080 | pipe_name(pipe)); |
1034 | 1081 | ||
1035 | reg = PCH_LVDS; | 1082 | reg = PCH_LVDS; |
1036 | val = I915_READ(reg); | 1083 | val = I915_READ(reg); |
1037 | WARN(LVDS_PIPE_ENABLED(val, pipe), | 1084 | WARN(lvds_pipe_enabled(dev_priv, val, pipe), |
1038 | "PCH LVDS enabled on transcoder %c, should be disabled\n", | 1085 | "PCH LVDS enabled on transcoder %c, should be disabled\n", |
1039 | pipe_name(pipe)); | 1086 | pipe_name(pipe)); |
1040 | 1087 | ||
@@ -1360,7 +1407,7 @@ static void disable_pch_dp(struct drm_i915_private *dev_priv, | |||
1360 | enum pipe pipe, int reg, u32 port_sel) | 1407 | enum pipe pipe, int reg, u32 port_sel) |
1361 | { | 1408 | { |
1362 | u32 val = I915_READ(reg); | 1409 | u32 val = I915_READ(reg); |
1363 | if (dp_pipe_enabled(dev_priv, pipe, reg, port_sel, val)) { | 1410 | if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) { |
1364 | DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); | 1411 | DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); |
1365 | I915_WRITE(reg, val & ~DP_PORT_EN); | 1412 | I915_WRITE(reg, val & ~DP_PORT_EN); |
1366 | } | 1413 | } |
@@ -1370,7 +1417,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv, | |||
1370 | enum pipe pipe, int reg) | 1417 | enum pipe pipe, int reg) |
1371 | { | 1418 | { |
1372 | u32 val = I915_READ(reg); | 1419 | u32 val = I915_READ(reg); |
1373 | if (HDMI_PIPE_ENABLED(val, pipe)) { | 1420 | if (hdmi_pipe_enabled(dev_priv, val, pipe)) { |
1374 | DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", | 1421 | DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", |
1375 | reg, pipe); | 1422 | reg, pipe); |
1376 | I915_WRITE(reg, val & ~PORT_ENABLE); | 1423 | I915_WRITE(reg, val & ~PORT_ENABLE); |
@@ -1392,12 +1439,13 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, | |||
1392 | 1439 | ||
1393 | reg = PCH_ADPA; | 1440 | reg = PCH_ADPA; |
1394 | val = I915_READ(reg); | 1441 | val = I915_READ(reg); |
1395 | if (ADPA_PIPE_ENABLED(val, pipe)) | 1442 | if (adpa_pipe_enabled(dev_priv, val, pipe)) |
1396 | I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); | 1443 | I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); |
1397 | 1444 | ||
1398 | reg = PCH_LVDS; | 1445 | reg = PCH_LVDS; |
1399 | val = I915_READ(reg); | 1446 | val = I915_READ(reg); |
1400 | if (LVDS_PIPE_ENABLED(val, pipe)) { | 1447 | if (lvds_pipe_enabled(dev_priv, val, pipe)) { |
1448 | DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); | ||
1401 | I915_WRITE(reg, val & ~LVDS_PORT_EN); | 1449 | I915_WRITE(reg, val & ~LVDS_PORT_EN); |
1402 | POSTING_READ(reg); | 1450 | POSTING_READ(reg); |
1403 | udelay(100); | 1451 | udelay(100); |
@@ -5049,6 +5097,81 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
5049 | return ret; | 5097 | return ret; |
5050 | } | 5098 | } |
5051 | 5099 | ||
5100 | static void ironlake_update_pch_refclk(struct drm_device *dev) | ||
5101 | { | ||
5102 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5103 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
5104 | struct drm_crtc *crtc; | ||
5105 | struct intel_encoder *encoder; | ||
5106 | struct intel_encoder *has_edp_encoder = NULL; | ||
5107 | u32 temp; | ||
5108 | bool has_lvds = false; | ||
5109 | |||
5110 | /* We need to take the global config into account */ | ||
5111 | list_for_each_entry(crtc, &mode_config->crtc_list, head) { | ||
5112 | if (!crtc->enabled) | ||
5113 | continue; | ||
5114 | |||
5115 | list_for_each_entry(encoder, &mode_config->encoder_list, | ||
5116 | base.head) { | ||
5117 | if (encoder->base.crtc != crtc) | ||
5118 | continue; | ||
5119 | |||
5120 | switch (encoder->type) { | ||
5121 | case INTEL_OUTPUT_LVDS: | ||
5122 | has_lvds = true; | ||
5123 | case INTEL_OUTPUT_EDP: | ||
5124 | has_edp_encoder = encoder; | ||
5125 | break; | ||
5126 | } | ||
5127 | } | ||
5128 | } | ||
5129 | |||
5130 | /* Ironlake: try to setup display ref clock before DPLL | ||
5131 | * enabling. This is only under driver's control after | ||
5132 | * PCH B stepping, previous chipset stepping should be | ||
5133 | * ignoring this setting. | ||
5134 | */ | ||
5135 | temp = I915_READ(PCH_DREF_CONTROL); | ||
5136 | /* Always enable nonspread source */ | ||
5137 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | ||
5138 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | ||
5139 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
5140 | temp |= DREF_SSC_SOURCE_ENABLE; | ||
5141 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5142 | |||
5143 | POSTING_READ(PCH_DREF_CONTROL); | ||
5144 | udelay(200); | ||
5145 | |||
5146 | if (has_edp_encoder) { | ||
5147 | if (intel_panel_use_ssc(dev_priv)) { | ||
5148 | temp |= DREF_SSC1_ENABLE; | ||
5149 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5150 | |||
5151 | POSTING_READ(PCH_DREF_CONTROL); | ||
5152 | udelay(200); | ||
5153 | } | ||
5154 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
5155 | |||
5156 | /* Enable CPU source on CPU attached eDP */ | ||
5157 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | ||
5158 | if (intel_panel_use_ssc(dev_priv)) | ||
5159 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | ||
5160 | else | ||
5161 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | ||
5162 | } else { | ||
5163 | /* Enable SSC on PCH eDP if needed */ | ||
5164 | if (intel_panel_use_ssc(dev_priv)) { | ||
5165 | DRM_ERROR("enabling SSC on PCH\n"); | ||
5166 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; | ||
5167 | } | ||
5168 | } | ||
5169 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5170 | POSTING_READ(PCH_DREF_CONTROL); | ||
5171 | udelay(200); | ||
5172 | } | ||
5173 | } | ||
5174 | |||
5052 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | 5175 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, |
5053 | struct drm_display_mode *mode, | 5176 | struct drm_display_mode *mode, |
5054 | struct drm_display_mode *adjusted_mode, | 5177 | struct drm_display_mode *adjusted_mode, |
@@ -5244,49 +5367,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5244 | ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, | 5367 | ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, |
5245 | &m_n); | 5368 | &m_n); |
5246 | 5369 | ||
5247 | /* Ironlake: try to setup display ref clock before DPLL | 5370 | ironlake_update_pch_refclk(dev); |
5248 | * enabling. This is only under driver's control after | ||
5249 | * PCH B stepping, previous chipset stepping should be | ||
5250 | * ignoring this setting. | ||
5251 | */ | ||
5252 | temp = I915_READ(PCH_DREF_CONTROL); | ||
5253 | /* Always enable nonspread source */ | ||
5254 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | ||
5255 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | ||
5256 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
5257 | temp |= DREF_SSC_SOURCE_ENABLE; | ||
5258 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5259 | |||
5260 | POSTING_READ(PCH_DREF_CONTROL); | ||
5261 | udelay(200); | ||
5262 | |||
5263 | if (has_edp_encoder) { | ||
5264 | if (intel_panel_use_ssc(dev_priv)) { | ||
5265 | temp |= DREF_SSC1_ENABLE; | ||
5266 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5267 | |||
5268 | POSTING_READ(PCH_DREF_CONTROL); | ||
5269 | udelay(200); | ||
5270 | } | ||
5271 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
5272 | |||
5273 | /* Enable CPU source on CPU attached eDP */ | ||
5274 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | ||
5275 | if (intel_panel_use_ssc(dev_priv)) | ||
5276 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | ||
5277 | else | ||
5278 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | ||
5279 | } else { | ||
5280 | /* Enable SSC on PCH eDP if needed */ | ||
5281 | if (intel_panel_use_ssc(dev_priv)) { | ||
5282 | DRM_ERROR("enabling SSC on PCH\n"); | ||
5283 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; | ||
5284 | } | ||
5285 | } | ||
5286 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5287 | POSTING_READ(PCH_DREF_CONTROL); | ||
5288 | udelay(200); | ||
5289 | } | ||
5290 | 5371 | ||
5291 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | 5372 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
5292 | if (has_reduced_clock) | 5373 | if (has_reduced_clock) |
@@ -7157,8 +7238,6 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
7157 | intel_encoder_clones(dev, encoder->clone_mask); | 7238 | intel_encoder_clones(dev, encoder->clone_mask); |
7158 | } | 7239 | } |
7159 | 7240 | ||
7160 | intel_panel_setup_backlight(dev); | ||
7161 | |||
7162 | /* disable all the possible outputs/crtcs before entering KMS mode */ | 7241 | /* disable all the possible outputs/crtcs before entering KMS mode */ |
7163 | drm_helper_disable_unused_functions(dev); | 7242 | drm_helper_disable_unused_functions(dev); |
7164 | } | 7243 | } |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 0feae908bb37..44fef5e1c490 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1841,6 +1841,11 @@ done: | |||
1841 | static void | 1841 | static void |
1842 | intel_dp_destroy (struct drm_connector *connector) | 1842 | intel_dp_destroy (struct drm_connector *connector) |
1843 | { | 1843 | { |
1844 | struct drm_device *dev = connector->dev; | ||
1845 | |||
1846 | if (intel_dpd_is_edp(dev)) | ||
1847 | intel_panel_destroy_backlight(dev); | ||
1848 | |||
1844 | drm_sysfs_connector_remove(connector); | 1849 | drm_sysfs_connector_remove(connector); |
1845 | drm_connector_cleanup(connector); | 1850 | drm_connector_cleanup(connector); |
1846 | kfree(connector); | 1851 | kfree(connector); |
@@ -2072,6 +2077,8 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2072 | DRM_MODE_TYPE_PREFERRED; | 2077 | DRM_MODE_TYPE_PREFERRED; |
2073 | } | 2078 | } |
2074 | } | 2079 | } |
2080 | dev_priv->int_edp_connector = connector; | ||
2081 | intel_panel_setup_backlight(dev); | ||
2075 | } | 2082 | } |
2076 | 2083 | ||
2077 | intel_dp_add_properties(intel_dp, connector); | 2084 | intel_dp_add_properties(intel_dp, connector); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 7b330e76a435..0b2ee9d39980 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -297,9 +297,10 @@ extern void intel_pch_panel_fitting(struct drm_device *dev, | |||
297 | extern u32 intel_panel_get_max_backlight(struct drm_device *dev); | 297 | extern u32 intel_panel_get_max_backlight(struct drm_device *dev); |
298 | extern u32 intel_panel_get_backlight(struct drm_device *dev); | 298 | extern u32 intel_panel_get_backlight(struct drm_device *dev); |
299 | extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); | 299 | extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); |
300 | extern void intel_panel_setup_backlight(struct drm_device *dev); | 300 | extern int intel_panel_setup_backlight(struct drm_device *dev); |
301 | extern void intel_panel_enable_backlight(struct drm_device *dev); | 301 | extern void intel_panel_enable_backlight(struct drm_device *dev); |
302 | extern void intel_panel_disable_backlight(struct drm_device *dev); | 302 | extern void intel_panel_disable_backlight(struct drm_device *dev); |
303 | extern void intel_panel_destroy_backlight(struct drm_device *dev); | ||
303 | extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); | 304 | extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); |
304 | 305 | ||
305 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); | 306 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 2e8ddfcba40c..31da77f5c051 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -72,14 +72,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds) | |||
72 | { | 72 | { |
73 | struct drm_device *dev = intel_lvds->base.base.dev; | 73 | struct drm_device *dev = intel_lvds->base.base.dev; |
74 | struct drm_i915_private *dev_priv = dev->dev_private; | 74 | struct drm_i915_private *dev_priv = dev->dev_private; |
75 | u32 ctl_reg, lvds_reg; | 75 | u32 ctl_reg, lvds_reg, stat_reg; |
76 | 76 | ||
77 | if (HAS_PCH_SPLIT(dev)) { | 77 | if (HAS_PCH_SPLIT(dev)) { |
78 | ctl_reg = PCH_PP_CONTROL; | 78 | ctl_reg = PCH_PP_CONTROL; |
79 | lvds_reg = PCH_LVDS; | 79 | lvds_reg = PCH_LVDS; |
80 | stat_reg = PCH_PP_STATUS; | ||
80 | } else { | 81 | } else { |
81 | ctl_reg = PP_CONTROL; | 82 | ctl_reg = PP_CONTROL; |
82 | lvds_reg = LVDS; | 83 | lvds_reg = LVDS; |
84 | stat_reg = PP_STATUS; | ||
83 | } | 85 | } |
84 | 86 | ||
85 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); | 87 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); |
@@ -94,17 +96,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds) | |||
94 | DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", | 96 | DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", |
95 | intel_lvds->pfit_control, | 97 | intel_lvds->pfit_control, |
96 | intel_lvds->pfit_pgm_ratios); | 98 | intel_lvds->pfit_pgm_ratios); |
97 | if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) { | 99 | |
98 | DRM_ERROR("timed out waiting for panel to power off\n"); | 100 | I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); |
99 | } else { | 101 | I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); |
100 | I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); | 102 | intel_lvds->pfit_dirty = false; |
101 | I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); | ||
102 | intel_lvds->pfit_dirty = false; | ||
103 | } | ||
104 | } | 103 | } |
105 | 104 | ||
106 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); | 105 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); |
107 | POSTING_READ(lvds_reg); | 106 | POSTING_READ(lvds_reg); |
107 | if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) | ||
108 | DRM_ERROR("timed out waiting for panel to power on\n"); | ||
108 | 109 | ||
109 | intel_panel_enable_backlight(dev); | 110 | intel_panel_enable_backlight(dev); |
110 | } | 111 | } |
@@ -113,24 +114,25 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds) | |||
113 | { | 114 | { |
114 | struct drm_device *dev = intel_lvds->base.base.dev; | 115 | struct drm_device *dev = intel_lvds->base.base.dev; |
115 | struct drm_i915_private *dev_priv = dev->dev_private; | 116 | struct drm_i915_private *dev_priv = dev->dev_private; |
116 | u32 ctl_reg, lvds_reg; | 117 | u32 ctl_reg, lvds_reg, stat_reg; |
117 | 118 | ||
118 | if (HAS_PCH_SPLIT(dev)) { | 119 | if (HAS_PCH_SPLIT(dev)) { |
119 | ctl_reg = PCH_PP_CONTROL; | 120 | ctl_reg = PCH_PP_CONTROL; |
120 | lvds_reg = PCH_LVDS; | 121 | lvds_reg = PCH_LVDS; |
122 | stat_reg = PCH_PP_STATUS; | ||
121 | } else { | 123 | } else { |
122 | ctl_reg = PP_CONTROL; | 124 | ctl_reg = PP_CONTROL; |
123 | lvds_reg = LVDS; | 125 | lvds_reg = LVDS; |
126 | stat_reg = PP_STATUS; | ||
124 | } | 127 | } |
125 | 128 | ||
126 | intel_panel_disable_backlight(dev); | 129 | intel_panel_disable_backlight(dev); |
127 | 130 | ||
128 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); | 131 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); |
132 | if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) | ||
133 | DRM_ERROR("timed out waiting for panel to power off\n"); | ||
129 | 134 | ||
130 | if (intel_lvds->pfit_control) { | 135 | if (intel_lvds->pfit_control) { |
131 | if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) | ||
132 | DRM_ERROR("timed out waiting for panel to power off\n"); | ||
133 | |||
134 | I915_WRITE(PFIT_CONTROL, 0); | 136 | I915_WRITE(PFIT_CONTROL, 0); |
135 | intel_lvds->pfit_dirty = true; | 137 | intel_lvds->pfit_dirty = true; |
136 | } | 138 | } |
@@ -398,53 +400,21 @@ out: | |||
398 | 400 | ||
399 | static void intel_lvds_prepare(struct drm_encoder *encoder) | 401 | static void intel_lvds_prepare(struct drm_encoder *encoder) |
400 | { | 402 | { |
401 | struct drm_device *dev = encoder->dev; | ||
402 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
403 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | 403 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
404 | 404 | ||
405 | /* We try to do the minimum that is necessary in order to unlock | 405 | /* |
406 | * the registers for mode setting. | ||
407 | * | ||
408 | * On Ironlake, this is quite simple as we just set the unlock key | ||
409 | * and ignore all subtleties. (This may cause some issues...) | ||
410 | * | ||
411 | * Prior to Ironlake, we must disable the pipe if we want to adjust | 406 | * Prior to Ironlake, we must disable the pipe if we want to adjust |
412 | * the panel fitter. However at all other times we can just reset | 407 | * the panel fitter. However at all other times we can just reset |
413 | * the registers regardless. | 408 | * the registers regardless. |
414 | */ | 409 | */ |
415 | 410 | if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty) | |
416 | if (HAS_PCH_SPLIT(dev)) { | 411 | intel_lvds_disable(intel_lvds); |
417 | I915_WRITE(PCH_PP_CONTROL, | ||
418 | I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); | ||
419 | } else if (intel_lvds->pfit_dirty) { | ||
420 | I915_WRITE(PP_CONTROL, | ||
421 | (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS) | ||
422 | & ~POWER_TARGET_ON); | ||
423 | } else { | ||
424 | I915_WRITE(PP_CONTROL, | ||
425 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); | ||
426 | } | ||
427 | } | 412 | } |
428 | 413 | ||
429 | static void intel_lvds_commit(struct drm_encoder *encoder) | 414 | static void intel_lvds_commit(struct drm_encoder *encoder) |
430 | { | 415 | { |
431 | struct drm_device *dev = encoder->dev; | ||
432 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
433 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | 416 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
434 | 417 | ||
435 | /* Undo any unlocking done in prepare to prevent accidental | ||
436 | * adjustment of the registers. | ||
437 | */ | ||
438 | if (HAS_PCH_SPLIT(dev)) { | ||
439 | u32 val = I915_READ(PCH_PP_CONTROL); | ||
440 | if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS) | ||
441 | I915_WRITE(PCH_PP_CONTROL, val & 0x3); | ||
442 | } else { | ||
443 | u32 val = I915_READ(PP_CONTROL); | ||
444 | if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS) | ||
445 | I915_WRITE(PP_CONTROL, val & 0x3); | ||
446 | } | ||
447 | |||
448 | /* Always do a full power on as we do not know what state | 418 | /* Always do a full power on as we do not know what state |
449 | * we were left in. | 419 | * we were left in. |
450 | */ | 420 | */ |
@@ -582,6 +552,8 @@ static void intel_lvds_destroy(struct drm_connector *connector) | |||
582 | struct drm_device *dev = connector->dev; | 552 | struct drm_device *dev = connector->dev; |
583 | struct drm_i915_private *dev_priv = dev->dev_private; | 553 | struct drm_i915_private *dev_priv = dev->dev_private; |
584 | 554 | ||
555 | intel_panel_destroy_backlight(dev); | ||
556 | |||
585 | if (dev_priv->lid_notifier.notifier_call) | 557 | if (dev_priv->lid_notifier.notifier_call) |
586 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); | 558 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); |
587 | drm_sysfs_connector_remove(connector); | 559 | drm_sysfs_connector_remove(connector); |
@@ -1040,6 +1012,19 @@ out: | |||
1040 | pwm = I915_READ(BLC_PWM_PCH_CTL1); | 1012 | pwm = I915_READ(BLC_PWM_PCH_CTL1); |
1041 | pwm |= PWM_PCH_ENABLE; | 1013 | pwm |= PWM_PCH_ENABLE; |
1042 | I915_WRITE(BLC_PWM_PCH_CTL1, pwm); | 1014 | I915_WRITE(BLC_PWM_PCH_CTL1, pwm); |
1015 | /* | ||
1016 | * Unlock registers and just | ||
1017 | * leave them unlocked | ||
1018 | */ | ||
1019 | I915_WRITE(PCH_PP_CONTROL, | ||
1020 | I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); | ||
1021 | } else { | ||
1022 | /* | ||
1023 | * Unlock registers and just | ||
1024 | * leave them unlocked | ||
1025 | */ | ||
1026 | I915_WRITE(PP_CONTROL, | ||
1027 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); | ||
1043 | } | 1028 | } |
1044 | dev_priv->lid_notifier.notifier_call = intel_lid_notify; | 1029 | dev_priv->lid_notifier.notifier_call = intel_lid_notify; |
1045 | if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { | 1030 | if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { |
@@ -1049,6 +1034,9 @@ out: | |||
1049 | /* keep the LVDS connector */ | 1034 | /* keep the LVDS connector */ |
1050 | dev_priv->int_lvds_connector = connector; | 1035 | dev_priv->int_lvds_connector = connector; |
1051 | drm_sysfs_connector_add(connector); | 1036 | drm_sysfs_connector_add(connector); |
1037 | |||
1038 | intel_panel_setup_backlight(dev); | ||
1039 | |||
1052 | return true; | 1040 | return true; |
1053 | 1041 | ||
1054 | failed: | 1042 | failed: |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index b7c5ddb564d1..b8e8158bb16e 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -227,7 +227,6 @@ void intel_opregion_asle_intr(struct drm_device *dev) | |||
227 | asle->aslc = asle_stat; | 227 | asle->aslc = asle_stat; |
228 | } | 228 | } |
229 | 229 | ||
230 | /* Only present on Ironlake+ */ | ||
231 | void intel_opregion_gse_intr(struct drm_device *dev) | 230 | void intel_opregion_gse_intr(struct drm_device *dev) |
232 | { | 231 | { |
233 | struct drm_i915_private *dev_priv = dev->dev_private; | 232 | struct drm_i915_private *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 05f500cd9c24..a9e0c7bcd317 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -277,7 +277,7 @@ void intel_panel_enable_backlight(struct drm_device *dev) | |||
277 | dev_priv->backlight_enabled = true; | 277 | dev_priv->backlight_enabled = true; |
278 | } | 278 | } |
279 | 279 | ||
280 | void intel_panel_setup_backlight(struct drm_device *dev) | 280 | static void intel_panel_init_backlight(struct drm_device *dev) |
281 | { | 281 | { |
282 | struct drm_i915_private *dev_priv = dev->dev_private; | 282 | struct drm_i915_private *dev_priv = dev->dev_private; |
283 | 283 | ||
@@ -309,3 +309,73 @@ intel_panel_detect(struct drm_device *dev) | |||
309 | 309 | ||
310 | return connector_status_unknown; | 310 | return connector_status_unknown; |
311 | } | 311 | } |
312 | |||
313 | #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE | ||
314 | static int intel_panel_update_status(struct backlight_device *bd) | ||
315 | { | ||
316 | struct drm_device *dev = bl_get_data(bd); | ||
317 | intel_panel_set_backlight(dev, bd->props.brightness); | ||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | static int intel_panel_get_brightness(struct backlight_device *bd) | ||
322 | { | ||
323 | struct drm_device *dev = bl_get_data(bd); | ||
324 | return intel_panel_get_backlight(dev); | ||
325 | } | ||
326 | |||
327 | static const struct backlight_ops intel_panel_bl_ops = { | ||
328 | .update_status = intel_panel_update_status, | ||
329 | .get_brightness = intel_panel_get_brightness, | ||
330 | }; | ||
331 | |||
332 | int intel_panel_setup_backlight(struct drm_device *dev) | ||
333 | { | ||
334 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
335 | struct backlight_properties props; | ||
336 | struct drm_connector *connector; | ||
337 | |||
338 | intel_panel_init_backlight(dev); | ||
339 | |||
340 | if (dev_priv->int_lvds_connector) | ||
341 | connector = dev_priv->int_lvds_connector; | ||
342 | else if (dev_priv->int_edp_connector) | ||
343 | connector = dev_priv->int_edp_connector; | ||
344 | else | ||
345 | return -ENODEV; | ||
346 | |||
347 | props.type = BACKLIGHT_RAW; | ||
348 | props.max_brightness = intel_panel_get_max_backlight(dev); | ||
349 | dev_priv->backlight = | ||
350 | backlight_device_register("intel_backlight", | ||
351 | &connector->kdev, dev, | ||
352 | &intel_panel_bl_ops, &props); | ||
353 | |||
354 | if (IS_ERR(dev_priv->backlight)) { | ||
355 | DRM_ERROR("Failed to register backlight: %ld\n", | ||
356 | PTR_ERR(dev_priv->backlight)); | ||
357 | dev_priv->backlight = NULL; | ||
358 | return -ENODEV; | ||
359 | } | ||
360 | dev_priv->backlight->props.brightness = intel_panel_get_backlight(dev); | ||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | void intel_panel_destroy_backlight(struct drm_device *dev) | ||
365 | { | ||
366 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
367 | if (dev_priv->backlight) | ||
368 | backlight_device_unregister(dev_priv->backlight); | ||
369 | } | ||
370 | #else | ||
371 | int intel_panel_setup_backlight(struct drm_device *dev) | ||
372 | { | ||
373 | intel_panel_init_backlight(dev); | ||
374 | return 0; | ||
375 | } | ||
376 | |||
377 | void intel_panel_destroy_backlight(struct drm_device *dev) | ||
378 | { | ||
379 | return; | ||
380 | } | ||
381 | #endif | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 47b9b2777038..c30626ea9f93 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -290,6 +290,10 @@ static int init_render_ring(struct intel_ring_buffer *ring) | |||
290 | if (IS_GEN6(dev) || IS_GEN7(dev)) | 290 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
291 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; | 291 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; |
292 | I915_WRITE(MI_MODE, mode); | 292 | I915_WRITE(MI_MODE, mode); |
293 | if (IS_GEN7(dev)) | ||
294 | I915_WRITE(GFX_MODE_GEN7, | ||
295 | GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | | ||
296 | GFX_MODE_ENABLE(GFX_REPLAY_MODE)); | ||
293 | } | 297 | } |
294 | 298 | ||
295 | if (INTEL_INFO(dev)->gen >= 6) { | 299 | if (INTEL_INFO(dev)->gen >= 6) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 8d02d875376d..c919cfc8f2fd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -530,7 +530,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) | |||
530 | nouveau_gpuobj_ref(NULL, &obj); | 530 | nouveau_gpuobj_ref(NULL, &obj); |
531 | if (ret) | 531 | if (ret) |
532 | return ret; | 532 | return ret; |
533 | } else { | 533 | } else |
534 | if (USE_SEMA(dev)) { | ||
534 | /* map fence bo into channel's vm */ | 535 | /* map fence bo into channel's vm */ |
535 | ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm, | 536 | ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm, |
536 | &chan->fence.vma); | 537 | &chan->fence.vma); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index c444cadbf849..2706cb3d871a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -37,8 +37,11 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, | |||
37 | return -ENOMEM; | 37 | return -ENOMEM; |
38 | 38 | ||
39 | nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); | 39 | nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); |
40 | if (!nvbe->ttm_alloced) | 40 | if (!nvbe->ttm_alloced) { |
41 | kfree(nvbe->pages); | ||
42 | nvbe->pages = NULL; | ||
41 | return -ENOMEM; | 43 | return -ENOMEM; |
44 | } | ||
42 | 45 | ||
43 | nvbe->nr_pages = 0; | 46 | nvbe->nr_pages = 0; |
44 | while (num_pages--) { | 47 | while (num_pages--) { |
@@ -126,7 +129,7 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | |||
126 | 129 | ||
127 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { | 130 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { |
128 | nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); | 131 | nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); |
129 | dma_offset += NV_CTXDMA_PAGE_SIZE; | 132 | offset_l += NV_CTXDMA_PAGE_SIZE; |
130 | } | 133 | } |
131 | } | 134 | } |
132 | 135 | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index 118261d4927a..5e45398a9e2d 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c | |||
@@ -781,11 +781,20 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
781 | struct drm_device *dev = crtc->dev; | 781 | struct drm_device *dev = crtc->dev; |
782 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 782 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
783 | struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; | 783 | struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; |
784 | struct drm_framebuffer *drm_fb = nv_crtc->base.fb; | 784 | struct drm_framebuffer *drm_fb; |
785 | struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); | 785 | struct nouveau_framebuffer *fb; |
786 | int arb_burst, arb_lwm; | 786 | int arb_burst, arb_lwm; |
787 | int ret; | 787 | int ret; |
788 | 788 | ||
789 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); | ||
790 | |||
791 | /* no fb bound */ | ||
792 | if (!atomic && !crtc->fb) { | ||
793 | NV_DEBUG_KMS(dev, "No FB bound\n"); | ||
794 | return 0; | ||
795 | } | ||
796 | |||
797 | |||
789 | /* If atomic, we want to switch to the fb we were passed, so | 798 | /* If atomic, we want to switch to the fb we were passed, so |
790 | * now we update pointers to do that. (We don't pin; just | 799 | * now we update pointers to do that. (We don't pin; just |
791 | * assume we're already pinned and update the base address.) | 800 | * assume we're already pinned and update the base address.) |
@@ -794,6 +803,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
794 | drm_fb = passed_fb; | 803 | drm_fb = passed_fb; |
795 | fb = nouveau_framebuffer(passed_fb); | 804 | fb = nouveau_framebuffer(passed_fb); |
796 | } else { | 805 | } else { |
806 | drm_fb = crtc->fb; | ||
807 | fb = nouveau_framebuffer(crtc->fb); | ||
797 | /* If not atomic, we can go ahead and pin, and unpin the | 808 | /* If not atomic, we can go ahead and pin, and unpin the |
798 | * old fb we were passed. | 809 | * old fb we were passed. |
799 | */ | 810 | */ |
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 46ad59ea2185..5d989073ba6e 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
@@ -519,12 +519,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
519 | struct drm_device *dev = nv_crtc->base.dev; | 519 | struct drm_device *dev = nv_crtc->base.dev; |
520 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 520 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
521 | struct nouveau_channel *evo = nv50_display(dev)->master; | 521 | struct nouveau_channel *evo = nv50_display(dev)->master; |
522 | struct drm_framebuffer *drm_fb = nv_crtc->base.fb; | 522 | struct drm_framebuffer *drm_fb; |
523 | struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); | 523 | struct nouveau_framebuffer *fb; |
524 | int ret; | 524 | int ret; |
525 | 525 | ||
526 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); | 526 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); |
527 | 527 | ||
528 | /* no fb bound */ | ||
529 | if (!atomic && !crtc->fb) { | ||
530 | NV_DEBUG_KMS(dev, "No FB bound\n"); | ||
531 | return 0; | ||
532 | } | ||
533 | |||
528 | /* If atomic, we want to switch to the fb we were passed, so | 534 | /* If atomic, we want to switch to the fb we were passed, so |
529 | * now we update pointers to do that. (We don't pin; just | 535 | * now we update pointers to do that. (We don't pin; just |
530 | * assume we're already pinned and update the base address.) | 536 | * assume we're already pinned and update the base address.) |
@@ -533,6 +539,8 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
533 | drm_fb = passed_fb; | 539 | drm_fb = passed_fb; |
534 | fb = nouveau_framebuffer(passed_fb); | 540 | fb = nouveau_framebuffer(passed_fb); |
535 | } else { | 541 | } else { |
542 | drm_fb = crtc->fb; | ||
543 | fb = nouveau_framebuffer(crtc->fb); | ||
536 | /* If not atomic, we can go ahead and pin, and unpin the | 544 | /* If not atomic, we can go ahead and pin, and unpin the |
537 | * old fb we were passed. | 545 | * old fb we were passed. |
538 | */ | 546 | */ |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 645b84b3d203..7ad43c6b1db7 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -613,6 +613,18 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector, | |||
613 | return true; | 613 | return true; |
614 | } | 614 | } |
615 | 615 | ||
616 | bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) | ||
617 | { | ||
618 | u8 link_status[DP_LINK_STATUS_SIZE]; | ||
619 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; | ||
620 | |||
621 | if (!radeon_dp_get_link_status(radeon_connector, link_status)) | ||
622 | return false; | ||
623 | if (dp_channel_eq_ok(link_status, dig->dp_lane_count)) | ||
624 | return false; | ||
625 | return true; | ||
626 | } | ||
627 | |||
616 | struct radeon_dp_link_train_info { | 628 | struct radeon_dp_link_train_info { |
617 | struct radeon_device *rdev; | 629 | struct radeon_device *rdev; |
618 | struct drm_encoder *encoder; | 630 | struct drm_encoder *encoder; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 14dce9f22172..e8a746712b5b 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -41,6 +41,31 @@ static void evergreen_gpu_init(struct radeon_device *rdev); | |||
41 | void evergreen_fini(struct radeon_device *rdev); | 41 | void evergreen_fini(struct radeon_device *rdev); |
42 | static void evergreen_pcie_gen2_enable(struct radeon_device *rdev); | 42 | static void evergreen_pcie_gen2_enable(struct radeon_device *rdev); |
43 | 43 | ||
44 | void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) | ||
45 | { | ||
46 | u16 ctl, v; | ||
47 | int cap, err; | ||
48 | |||
49 | cap = pci_pcie_cap(rdev->pdev); | ||
50 | if (!cap) | ||
51 | return; | ||
52 | |||
53 | err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl); | ||
54 | if (err) | ||
55 | return; | ||
56 | |||
57 | v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12; | ||
58 | |||
59 | /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it | ||
60 | * to avoid hangs or perfomance issues | ||
61 | */ | ||
62 | if ((v == 0) || (v == 6) || (v == 7)) { | ||
63 | ctl &= ~PCI_EXP_DEVCTL_READRQ; | ||
64 | ctl |= (2 << 12); | ||
65 | pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl); | ||
66 | } | ||
67 | } | ||
68 | |||
44 | void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) | 69 | void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) |
45 | { | 70 | { |
46 | /* enable the pflip int */ | 71 | /* enable the pflip int */ |
@@ -743,7 +768,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev, | |||
743 | !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || | 768 | !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || |
744 | !evergreen_check_latency_hiding(&wm) || | 769 | !evergreen_check_latency_hiding(&wm) || |
745 | (rdev->disp_priority == 2)) { | 770 | (rdev->disp_priority == 2)) { |
746 | DRM_INFO("force priority to high\n"); | 771 | DRM_DEBUG_KMS("force priority to high\n"); |
747 | priority_a_cnt |= PRIORITY_ALWAYS_ON; | 772 | priority_a_cnt |= PRIORITY_ALWAYS_ON; |
748 | priority_b_cnt |= PRIORITY_ALWAYS_ON; | 773 | priority_b_cnt |= PRIORITY_ALWAYS_ON; |
749 | } | 774 | } |
@@ -1357,6 +1382,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
1357 | SOFT_RESET_PA | | 1382 | SOFT_RESET_PA | |
1358 | SOFT_RESET_SH | | 1383 | SOFT_RESET_SH | |
1359 | SOFT_RESET_VGT | | 1384 | SOFT_RESET_VGT | |
1385 | SOFT_RESET_SPI | | ||
1360 | SOFT_RESET_SX)); | 1386 | SOFT_RESET_SX)); |
1361 | RREG32(GRBM_SOFT_RESET); | 1387 | RREG32(GRBM_SOFT_RESET); |
1362 | mdelay(15); | 1388 | mdelay(15); |
@@ -1378,7 +1404,8 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
1378 | /* Initialize the ring buffer's read and write pointers */ | 1404 | /* Initialize the ring buffer's read and write pointers */ |
1379 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | 1405 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); |
1380 | WREG32(CP_RB_RPTR_WR, 0); | 1406 | WREG32(CP_RB_RPTR_WR, 0); |
1381 | WREG32(CP_RB_WPTR, 0); | 1407 | rdev->cp.wptr = 0; |
1408 | WREG32(CP_RB_WPTR, rdev->cp.wptr); | ||
1382 | 1409 | ||
1383 | /* set the wb address wether it's enabled or not */ | 1410 | /* set the wb address wether it's enabled or not */ |
1384 | WREG32(CP_RB_RPTR_ADDR, | 1411 | WREG32(CP_RB_RPTR_ADDR, |
@@ -1400,7 +1427,6 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
1400 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); | 1427 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); |
1401 | 1428 | ||
1402 | rdev->cp.rptr = RREG32(CP_RB_RPTR); | 1429 | rdev->cp.rptr = RREG32(CP_RB_RPTR); |
1403 | rdev->cp.wptr = RREG32(CP_RB_WPTR); | ||
1404 | 1430 | ||
1405 | evergreen_cp_start(rdev); | 1431 | evergreen_cp_start(rdev); |
1406 | rdev->cp.ready = true; | 1432 | rdev->cp.ready = true; |
@@ -1862,6 +1888,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1862 | 1888 | ||
1863 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | 1889 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
1864 | 1890 | ||
1891 | evergreen_fix_pci_max_read_req_size(rdev); | ||
1892 | |||
1865 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; | 1893 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; |
1866 | 1894 | ||
1867 | cc_gc_shader_pipe_config |= | 1895 | cc_gc_shader_pipe_config |= |
@@ -3143,21 +3171,23 @@ int evergreen_suspend(struct radeon_device *rdev) | |||
3143 | } | 3171 | } |
3144 | 3172 | ||
3145 | int evergreen_copy_blit(struct radeon_device *rdev, | 3173 | int evergreen_copy_blit(struct radeon_device *rdev, |
3146 | uint64_t src_offset, uint64_t dst_offset, | 3174 | uint64_t src_offset, |
3147 | unsigned num_pages, struct radeon_fence *fence) | 3175 | uint64_t dst_offset, |
3176 | unsigned num_gpu_pages, | ||
3177 | struct radeon_fence *fence) | ||
3148 | { | 3178 | { |
3149 | int r; | 3179 | int r; |
3150 | 3180 | ||
3151 | mutex_lock(&rdev->r600_blit.mutex); | 3181 | mutex_lock(&rdev->r600_blit.mutex); |
3152 | rdev->r600_blit.vb_ib = NULL; | 3182 | rdev->r600_blit.vb_ib = NULL; |
3153 | r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); | 3183 | r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE); |
3154 | if (r) { | 3184 | if (r) { |
3155 | if (rdev->r600_blit.vb_ib) | 3185 | if (rdev->r600_blit.vb_ib) |
3156 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | 3186 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); |
3157 | mutex_unlock(&rdev->r600_blit.mutex); | 3187 | mutex_unlock(&rdev->r600_blit.mutex); |
3158 | return r; | 3188 | return r; |
3159 | } | 3189 | } |
3160 | evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); | 3190 | evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE); |
3161 | evergreen_blit_done_copy(rdev, fence); | 3191 | evergreen_blit_done_copy(rdev, fence); |
3162 | mutex_unlock(&rdev->r600_blit.mutex); | 3192 | mutex_unlock(&rdev->r600_blit.mutex); |
3163 | return 0; | 3193 | return 0; |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 44c4750f4518..99fbd793c08c 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -39,6 +39,7 @@ extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev); | |||
39 | extern void evergreen_mc_program(struct radeon_device *rdev); | 39 | extern void evergreen_mc_program(struct radeon_device *rdev); |
40 | extern void evergreen_irq_suspend(struct radeon_device *rdev); | 40 | extern void evergreen_irq_suspend(struct radeon_device *rdev); |
41 | extern int evergreen_mc_init(struct radeon_device *rdev); | 41 | extern int evergreen_mc_init(struct radeon_device *rdev); |
42 | extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); | ||
42 | 43 | ||
43 | #define EVERGREEN_PFP_UCODE_SIZE 1120 | 44 | #define EVERGREEN_PFP_UCODE_SIZE 1120 |
44 | #define EVERGREEN_PM4_UCODE_SIZE 1376 | 45 | #define EVERGREEN_PM4_UCODE_SIZE 1376 |
@@ -669,6 +670,8 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
669 | 670 | ||
670 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | 671 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
671 | 672 | ||
673 | evergreen_fix_pci_max_read_req_size(rdev); | ||
674 | |||
672 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); | 675 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); |
673 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | 676 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); |
674 | 677 | ||
@@ -1159,6 +1162,7 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1159 | SOFT_RESET_PA | | 1162 | SOFT_RESET_PA | |
1160 | SOFT_RESET_SH | | 1163 | SOFT_RESET_SH | |
1161 | SOFT_RESET_VGT | | 1164 | SOFT_RESET_VGT | |
1165 | SOFT_RESET_SPI | | ||
1162 | SOFT_RESET_SX)); | 1166 | SOFT_RESET_SX)); |
1163 | RREG32(GRBM_SOFT_RESET); | 1167 | RREG32(GRBM_SOFT_RESET); |
1164 | mdelay(15); | 1168 | mdelay(15); |
@@ -1183,7 +1187,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1183 | 1187 | ||
1184 | /* Initialize the ring buffer's read and write pointers */ | 1188 | /* Initialize the ring buffer's read and write pointers */ |
1185 | WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); | 1189 | WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); |
1186 | WREG32(CP_RB0_WPTR, 0); | 1190 | rdev->cp.wptr = 0; |
1191 | WREG32(CP_RB0_WPTR, rdev->cp.wptr); | ||
1187 | 1192 | ||
1188 | /* set the wb address wether it's enabled or not */ | 1193 | /* set the wb address wether it's enabled or not */ |
1189 | WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); | 1194 | WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); |
@@ -1203,7 +1208,6 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1203 | WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); | 1208 | WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); |
1204 | 1209 | ||
1205 | rdev->cp.rptr = RREG32(CP_RB0_RPTR); | 1210 | rdev->cp.rptr = RREG32(CP_RB0_RPTR); |
1206 | rdev->cp.wptr = RREG32(CP_RB0_WPTR); | ||
1207 | 1211 | ||
1208 | /* ring1 - compute only */ | 1212 | /* ring1 - compute only */ |
1209 | /* Set ring buffer size */ | 1213 | /* Set ring buffer size */ |
@@ -1216,7 +1220,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1216 | 1220 | ||
1217 | /* Initialize the ring buffer's read and write pointers */ | 1221 | /* Initialize the ring buffer's read and write pointers */ |
1218 | WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); | 1222 | WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); |
1219 | WREG32(CP_RB1_WPTR, 0); | 1223 | rdev->cp1.wptr = 0; |
1224 | WREG32(CP_RB1_WPTR, rdev->cp1.wptr); | ||
1220 | 1225 | ||
1221 | /* set the wb address wether it's enabled or not */ | 1226 | /* set the wb address wether it's enabled or not */ |
1222 | WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); | 1227 | WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); |
@@ -1228,7 +1233,6 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1228 | WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); | 1233 | WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); |
1229 | 1234 | ||
1230 | rdev->cp1.rptr = RREG32(CP_RB1_RPTR); | 1235 | rdev->cp1.rptr = RREG32(CP_RB1_RPTR); |
1231 | rdev->cp1.wptr = RREG32(CP_RB1_WPTR); | ||
1232 | 1236 | ||
1233 | /* ring2 - compute only */ | 1237 | /* ring2 - compute only */ |
1234 | /* Set ring buffer size */ | 1238 | /* Set ring buffer size */ |
@@ -1241,7 +1245,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1241 | 1245 | ||
1242 | /* Initialize the ring buffer's read and write pointers */ | 1246 | /* Initialize the ring buffer's read and write pointers */ |
1243 | WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); | 1247 | WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); |
1244 | WREG32(CP_RB2_WPTR, 0); | 1248 | rdev->cp2.wptr = 0; |
1249 | WREG32(CP_RB2_WPTR, rdev->cp2.wptr); | ||
1245 | 1250 | ||
1246 | /* set the wb address wether it's enabled or not */ | 1251 | /* set the wb address wether it's enabled or not */ |
1247 | WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); | 1252 | WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); |
@@ -1253,7 +1258,6 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1253 | WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); | 1258 | WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); |
1254 | 1259 | ||
1255 | rdev->cp2.rptr = RREG32(CP_RB2_RPTR); | 1260 | rdev->cp2.rptr = RREG32(CP_RB2_RPTR); |
1256 | rdev->cp2.wptr = RREG32(CP_RB2_WPTR); | ||
1257 | 1261 | ||
1258 | /* start the rings */ | 1262 | /* start the rings */ |
1259 | cayman_cp_start(rdev); | 1263 | cayman_cp_start(rdev); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index f2204cb1ccdf..5b1837b4aacf 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev, | |||
721 | int r100_copy_blit(struct radeon_device *rdev, | 721 | int r100_copy_blit(struct radeon_device *rdev, |
722 | uint64_t src_offset, | 722 | uint64_t src_offset, |
723 | uint64_t dst_offset, | 723 | uint64_t dst_offset, |
724 | unsigned num_pages, | 724 | unsigned num_gpu_pages, |
725 | struct radeon_fence *fence) | 725 | struct radeon_fence *fence) |
726 | { | 726 | { |
727 | uint32_t cur_pages; | 727 | uint32_t cur_pages; |
728 | uint32_t stride_bytes = PAGE_SIZE; | 728 | uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; |
729 | uint32_t pitch; | 729 | uint32_t pitch; |
730 | uint32_t stride_pixels; | 730 | uint32_t stride_pixels; |
731 | unsigned ndw; | 731 | unsigned ndw; |
@@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
737 | /* radeon pitch is /64 */ | 737 | /* radeon pitch is /64 */ |
738 | pitch = stride_bytes / 64; | 738 | pitch = stride_bytes / 64; |
739 | stride_pixels = stride_bytes / 4; | 739 | stride_pixels = stride_bytes / 4; |
740 | num_loops = DIV_ROUND_UP(num_pages, 8191); | 740 | num_loops = DIV_ROUND_UP(num_gpu_pages, 8191); |
741 | 741 | ||
742 | /* Ask for enough room for blit + flush + fence */ | 742 | /* Ask for enough room for blit + flush + fence */ |
743 | ndw = 64 + (10 * num_loops); | 743 | ndw = 64 + (10 * num_loops); |
@@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
746 | DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); | 746 | DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); |
747 | return -EINVAL; | 747 | return -EINVAL; |
748 | } | 748 | } |
749 | while (num_pages > 0) { | 749 | while (num_gpu_pages > 0) { |
750 | cur_pages = num_pages; | 750 | cur_pages = num_gpu_pages; |
751 | if (cur_pages > 8191) { | 751 | if (cur_pages > 8191) { |
752 | cur_pages = 8191; | 752 | cur_pages = 8191; |
753 | } | 753 | } |
754 | num_pages -= cur_pages; | 754 | num_gpu_pages -= cur_pages; |
755 | 755 | ||
756 | /* pages are in Y direction - height | 756 | /* pages are in Y direction - height |
757 | page width in X direction - width */ | 757 | page width in X direction - width */ |
@@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
773 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); | 773 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); |
774 | radeon_ring_write(rdev, 0); | 774 | radeon_ring_write(rdev, 0); |
775 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); | 775 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); |
776 | radeon_ring_write(rdev, num_pages); | 776 | radeon_ring_write(rdev, cur_pages); |
777 | radeon_ring_write(rdev, num_pages); | 777 | radeon_ring_write(rdev, cur_pages); |
778 | radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); | 778 | radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); |
779 | } | 779 | } |
780 | radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); | 780 | radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); |
@@ -990,7 +990,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
990 | /* Force read & write ptr to 0 */ | 990 | /* Force read & write ptr to 0 */ |
991 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); | 991 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); |
992 | WREG32(RADEON_CP_RB_RPTR_WR, 0); | 992 | WREG32(RADEON_CP_RB_RPTR_WR, 0); |
993 | WREG32(RADEON_CP_RB_WPTR, 0); | 993 | rdev->cp.wptr = 0; |
994 | WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); | ||
994 | 995 | ||
995 | /* set the wb address whether it's enabled or not */ | 996 | /* set the wb address whether it's enabled or not */ |
996 | WREG32(R_00070C_CP_RB_RPTR_ADDR, | 997 | WREG32(R_00070C_CP_RB_RPTR_ADDR, |
@@ -1007,9 +1008,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1007 | WREG32(RADEON_CP_RB_CNTL, tmp); | 1008 | WREG32(RADEON_CP_RB_CNTL, tmp); |
1008 | udelay(10); | 1009 | udelay(10); |
1009 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); | 1010 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); |
1010 | rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); | ||
1011 | /* protect against crazy HW on resume */ | ||
1012 | rdev->cp.wptr &= rdev->cp.ptr_mask; | ||
1013 | /* Set cp mode to bus mastering & enable cp*/ | 1011 | /* Set cp mode to bus mastering & enable cp*/ |
1014 | WREG32(RADEON_CP_CSQ_MODE, | 1012 | WREG32(RADEON_CP_CSQ_MODE, |
1015 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | | 1013 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index f24058300413..a1f3ba063c2d 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0) | |||
84 | int r200_copy_dma(struct radeon_device *rdev, | 84 | int r200_copy_dma(struct radeon_device *rdev, |
85 | uint64_t src_offset, | 85 | uint64_t src_offset, |
86 | uint64_t dst_offset, | 86 | uint64_t dst_offset, |
87 | unsigned num_pages, | 87 | unsigned num_gpu_pages, |
88 | struct radeon_fence *fence) | 88 | struct radeon_fence *fence) |
89 | { | 89 | { |
90 | uint32_t size; | 90 | uint32_t size; |
@@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev, | |||
93 | int r = 0; | 93 | int r = 0; |
94 | 94 | ||
95 | /* radeon pitch is /64 */ | 95 | /* radeon pitch is /64 */ |
96 | size = num_pages << PAGE_SHIFT; | 96 | size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT; |
97 | num_loops = DIV_ROUND_UP(size, 0x1FFFFF); | 97 | num_loops = DIV_ROUND_UP(size, 0x1FFFFF); |
98 | r = radeon_ring_lock(rdev, num_loops * 4 + 64); | 98 | r = radeon_ring_lock(rdev, num_loops * 4 + 64); |
99 | if (r) { | 99 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index aa5571b73aa0..720dd99163f8 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2209,7 +2209,8 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2209 | /* Initialize the ring buffer's read and write pointers */ | 2209 | /* Initialize the ring buffer's read and write pointers */ |
2210 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | 2210 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); |
2211 | WREG32(CP_RB_RPTR_WR, 0); | 2211 | WREG32(CP_RB_RPTR_WR, 0); |
2212 | WREG32(CP_RB_WPTR, 0); | 2212 | rdev->cp.wptr = 0; |
2213 | WREG32(CP_RB_WPTR, rdev->cp.wptr); | ||
2213 | 2214 | ||
2214 | /* set the wb address whether it's enabled or not */ | 2215 | /* set the wb address whether it's enabled or not */ |
2215 | WREG32(CP_RB_RPTR_ADDR, | 2216 | WREG32(CP_RB_RPTR_ADDR, |
@@ -2231,7 +2232,6 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2231 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); | 2232 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); |
2232 | 2233 | ||
2233 | rdev->cp.rptr = RREG32(CP_RB_RPTR); | 2234 | rdev->cp.rptr = RREG32(CP_RB_RPTR); |
2234 | rdev->cp.wptr = RREG32(CP_RB_WPTR); | ||
2235 | 2235 | ||
2236 | r600_cp_start(rdev); | 2236 | r600_cp_start(rdev); |
2237 | rdev->cp.ready = true; | 2237 | rdev->cp.ready = true; |
@@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev, | |||
2353 | } | 2353 | } |
2354 | 2354 | ||
2355 | int r600_copy_blit(struct radeon_device *rdev, | 2355 | int r600_copy_blit(struct radeon_device *rdev, |
2356 | uint64_t src_offset, uint64_t dst_offset, | 2356 | uint64_t src_offset, |
2357 | unsigned num_pages, struct radeon_fence *fence) | 2357 | uint64_t dst_offset, |
2358 | unsigned num_gpu_pages, | ||
2359 | struct radeon_fence *fence) | ||
2358 | { | 2360 | { |
2359 | int r; | 2361 | int r; |
2360 | 2362 | ||
2361 | mutex_lock(&rdev->r600_blit.mutex); | 2363 | mutex_lock(&rdev->r600_blit.mutex); |
2362 | rdev->r600_blit.vb_ib = NULL; | 2364 | rdev->r600_blit.vb_ib = NULL; |
2363 | r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); | 2365 | r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE); |
2364 | if (r) { | 2366 | if (r) { |
2365 | if (rdev->r600_blit.vb_ib) | 2367 | if (rdev->r600_blit.vb_ib) |
2366 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | 2368 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); |
2367 | mutex_unlock(&rdev->r600_blit.mutex); | 2369 | mutex_unlock(&rdev->r600_blit.mutex); |
2368 | return r; | 2370 | return r; |
2369 | } | 2371 | } |
2370 | r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); | 2372 | r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE); |
2371 | r600_blit_done_copy(rdev, fence); | 2373 | r600_blit_done_copy(rdev, fence); |
2372 | mutex_unlock(&rdev->r600_blit.mutex); | 2374 | mutex_unlock(&rdev->r600_blit.mutex); |
2373 | return 0; | 2375 | return 0; |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 32807baf55e2..c1e056b35b29 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -322,6 +322,7 @@ union radeon_gart_table { | |||
322 | 322 | ||
323 | #define RADEON_GPU_PAGE_SIZE 4096 | 323 | #define RADEON_GPU_PAGE_SIZE 4096 |
324 | #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) | 324 | #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) |
325 | #define RADEON_GPU_PAGE_SHIFT 12 | ||
325 | 326 | ||
326 | struct radeon_gart { | 327 | struct radeon_gart { |
327 | dma_addr_t table_addr; | 328 | dma_addr_t table_addr; |
@@ -914,17 +915,17 @@ struct radeon_asic { | |||
914 | int (*copy_blit)(struct radeon_device *rdev, | 915 | int (*copy_blit)(struct radeon_device *rdev, |
915 | uint64_t src_offset, | 916 | uint64_t src_offset, |
916 | uint64_t dst_offset, | 917 | uint64_t dst_offset, |
917 | unsigned num_pages, | 918 | unsigned num_gpu_pages, |
918 | struct radeon_fence *fence); | 919 | struct radeon_fence *fence); |
919 | int (*copy_dma)(struct radeon_device *rdev, | 920 | int (*copy_dma)(struct radeon_device *rdev, |
920 | uint64_t src_offset, | 921 | uint64_t src_offset, |
921 | uint64_t dst_offset, | 922 | uint64_t dst_offset, |
922 | unsigned num_pages, | 923 | unsigned num_gpu_pages, |
923 | struct radeon_fence *fence); | 924 | struct radeon_fence *fence); |
924 | int (*copy)(struct radeon_device *rdev, | 925 | int (*copy)(struct radeon_device *rdev, |
925 | uint64_t src_offset, | 926 | uint64_t src_offset, |
926 | uint64_t dst_offset, | 927 | uint64_t dst_offset, |
927 | unsigned num_pages, | 928 | unsigned num_gpu_pages, |
928 | struct radeon_fence *fence); | 929 | struct radeon_fence *fence); |
929 | uint32_t (*get_engine_clock)(struct radeon_device *rdev); | 930 | uint32_t (*get_engine_clock)(struct radeon_device *rdev); |
930 | void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); | 931 | void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 3d7a0d7c6a9a..3dedaa07aac1 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg); | |||
75 | int r100_copy_blit(struct radeon_device *rdev, | 75 | int r100_copy_blit(struct radeon_device *rdev, |
76 | uint64_t src_offset, | 76 | uint64_t src_offset, |
77 | uint64_t dst_offset, | 77 | uint64_t dst_offset, |
78 | unsigned num_pages, | 78 | unsigned num_gpu_pages, |
79 | struct radeon_fence *fence); | 79 | struct radeon_fence *fence); |
80 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, | 80 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, |
81 | uint32_t tiling_flags, uint32_t pitch, | 81 | uint32_t tiling_flags, uint32_t pitch, |
@@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); | |||
143 | extern int r200_copy_dma(struct radeon_device *rdev, | 143 | extern int r200_copy_dma(struct radeon_device *rdev, |
144 | uint64_t src_offset, | 144 | uint64_t src_offset, |
145 | uint64_t dst_offset, | 145 | uint64_t dst_offset, |
146 | unsigned num_pages, | 146 | unsigned num_gpu_pages, |
147 | struct radeon_fence *fence); | 147 | struct radeon_fence *fence); |
148 | void r200_set_safe_registers(struct radeon_device *rdev); | 148 | void r200_set_safe_registers(struct radeon_device *rdev); |
149 | 149 | ||
@@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | |||
311 | int r600_ring_test(struct radeon_device *rdev); | 311 | int r600_ring_test(struct radeon_device *rdev); |
312 | int r600_copy_blit(struct radeon_device *rdev, | 312 | int r600_copy_blit(struct radeon_device *rdev, |
313 | uint64_t src_offset, uint64_t dst_offset, | 313 | uint64_t src_offset, uint64_t dst_offset, |
314 | unsigned num_pages, struct radeon_fence *fence); | 314 | unsigned num_gpu_pages, struct radeon_fence *fence); |
315 | void r600_hpd_init(struct radeon_device *rdev); | 315 | void r600_hpd_init(struct radeon_device *rdev); |
316 | void r600_hpd_fini(struct radeon_device *rdev); | 316 | void r600_hpd_fini(struct radeon_device *rdev); |
317 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 317 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
@@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev); | |||
403 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 403 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
404 | int evergreen_copy_blit(struct radeon_device *rdev, | 404 | int evergreen_copy_blit(struct radeon_device *rdev, |
405 | uint64_t src_offset, uint64_t dst_offset, | 405 | uint64_t src_offset, uint64_t dst_offset, |
406 | unsigned num_pages, struct radeon_fence *fence); | 406 | unsigned num_gpu_pages, struct radeon_fence *fence); |
407 | void evergreen_hpd_init(struct radeon_device *rdev); | 407 | void evergreen_hpd_init(struct radeon_device *rdev); |
408 | void evergreen_hpd_fini(struct radeon_device *rdev); | 408 | void evergreen_hpd_fini(struct radeon_device *rdev); |
409 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 409 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index dcd0863e31ae..b6e18c8db9f5 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
@@ -219,6 +219,9 @@ void radeon_get_clock_info(struct drm_device *dev) | |||
219 | } else { | 219 | } else { |
220 | DRM_INFO("Using generic clock info\n"); | 220 | DRM_INFO("Using generic clock info\n"); |
221 | 221 | ||
222 | /* may need to be per card */ | ||
223 | rdev->clock.max_pixel_clock = 35000; | ||
224 | |||
222 | if (rdev->flags & RADEON_IS_IGP) { | 225 | if (rdev->flags & RADEON_IS_IGP) { |
223 | p1pll->reference_freq = 1432; | 226 | p1pll->reference_freq = 1432; |
224 | p2pll->reference_freq = 1432; | 227 | p2pll->reference_freq = 1432; |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index e0138b674aca..63675241c7ff 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -3298,6 +3298,14 @@ void radeon_combios_asic_init(struct drm_device *dev) | |||
3298 | rdev->pdev->subsystem_device == 0x30a4) | 3298 | rdev->pdev->subsystem_device == 0x30a4) |
3299 | return; | 3299 | return; |
3300 | 3300 | ||
3301 | /* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume | ||
3302 | * - it hangs on resume inside the dynclk 1 table. | ||
3303 | */ | ||
3304 | if (rdev->family == CHIP_RS480 && | ||
3305 | rdev->pdev->subsystem_vendor == 0x103c && | ||
3306 | rdev->pdev->subsystem_device == 0x30ae) | ||
3307 | return; | ||
3308 | |||
3301 | /* DYN CLK 1 */ | 3309 | /* DYN CLK 1 */ |
3302 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); | 3310 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); |
3303 | if (table) | 3311 | if (table) |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 6d6b5f16bc09..c4b8741dbf58 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -60,18 +60,20 @@ void radeon_connector_hotplug(struct drm_connector *connector) | |||
60 | 60 | ||
61 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); | 61 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); |
62 | 62 | ||
63 | /* powering up/down the eDP panel generates hpd events which | 63 | /* if the connector is already off, don't turn it back on */ |
64 | * can interfere with modesetting. | 64 | if (connector->dpms != DRM_MODE_DPMS_ON) |
65 | */ | ||
66 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) | ||
67 | return; | 65 | return; |
68 | 66 | ||
69 | /* pre-r600 did not always have the hpd pins mapped accurately to connectors */ | 67 | /* just deal with DP (not eDP) here. */ |
70 | if (rdev->family >= CHIP_R600) { | 68 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { |
71 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) | 69 | int saved_dpms = connector->dpms; |
70 | |||
71 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && | ||
72 | radeon_dp_needs_link_train(radeon_connector)) | ||
72 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | 73 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); |
73 | else | 74 | else |
74 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | 75 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); |
76 | connector->dpms = saved_dpms; | ||
75 | } | 77 | } |
76 | } | 78 | } |
77 | 79 | ||
@@ -464,6 +466,16 @@ static bool radeon_connector_needs_extended_probe(struct radeon_device *dev, | |||
464 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) | 466 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) |
465 | return true; | 467 | return true; |
466 | } | 468 | } |
469 | /* TOSHIBA Satellite L300D with ATI Mobility Radeon x1100 | ||
470 | * (RS690M) sends data to i2c bus for a HDMI connector that | ||
471 | * is not implemented */ | ||
472 | if ((dev->pdev->device == 0x791f) && | ||
473 | (dev->pdev->subsystem_vendor == 0x1179) && | ||
474 | (dev->pdev->subsystem_device == 0xff68)) { | ||
475 | if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) && | ||
476 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) | ||
477 | return true; | ||
478 | } | ||
467 | 479 | ||
468 | /* Default: no EDID header probe required for DDC probing */ | 480 | /* Default: no EDID header probe required for DDC probing */ |
469 | return false; | 481 | return false; |
@@ -474,11 +486,19 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, | |||
474 | { | 486 | { |
475 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 487 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
476 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; | 488 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; |
489 | struct drm_display_mode *t, *mode; | ||
490 | |||
491 | /* If the EDID preferred mode doesn't match the native mode, use it */ | ||
492 | list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { | ||
493 | if (mode->type & DRM_MODE_TYPE_PREFERRED) { | ||
494 | if (mode->hdisplay != native_mode->hdisplay || | ||
495 | mode->vdisplay != native_mode->vdisplay) | ||
496 | memcpy(native_mode, mode, sizeof(*mode)); | ||
497 | } | ||
498 | } | ||
477 | 499 | ||
478 | /* Try to get native mode details from EDID if necessary */ | 500 | /* Try to get native mode details from EDID if necessary */ |
479 | if (!native_mode->clock) { | 501 | if (!native_mode->clock) { |
480 | struct drm_display_mode *t, *mode; | ||
481 | |||
482 | list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { | 502 | list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { |
483 | if (mode->hdisplay == native_mode->hdisplay && | 503 | if (mode->hdisplay == native_mode->hdisplay && |
484 | mode->vdisplay == native_mode->vdisplay) { | 504 | mode->vdisplay == native_mode->vdisplay) { |
@@ -489,6 +509,7 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, | |||
489 | } | 509 | } |
490 | } | 510 | } |
491 | } | 511 | } |
512 | |||
492 | if (!native_mode->clock) { | 513 | if (!native_mode->clock) { |
493 | DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); | 514 | DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); |
494 | radeon_encoder->rmx_type = RMX_OFF; | 515 | radeon_encoder->rmx_type = RMX_OFF; |
@@ -1276,12 +1297,33 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1276 | if (!radeon_dig_connector->edp_on) | 1297 | if (!radeon_dig_connector->edp_on) |
1277 | atombios_set_edp_panel_power(connector, | 1298 | atombios_set_edp_panel_power(connector, |
1278 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | 1299 | ATOM_TRANSMITTER_ACTION_POWER_OFF); |
1279 | } else { | 1300 | } else if (radeon_connector_encoder_is_dp_bridge(connector)) { |
1280 | /* need to setup ddc on the bridge */ | 1301 | /* DP bridges are always DP */ |
1281 | if (radeon_connector_encoder_is_dp_bridge(connector)) { | 1302 | radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; |
1303 | /* get the DPCD from the bridge */ | ||
1304 | radeon_dp_getdpcd(radeon_connector); | ||
1305 | |||
1306 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) | ||
1307 | ret = connector_status_connected; | ||
1308 | else { | ||
1309 | /* need to setup ddc on the bridge */ | ||
1282 | if (encoder) | 1310 | if (encoder) |
1283 | radeon_atom_ext_encoder_setup_ddc(encoder); | 1311 | radeon_atom_ext_encoder_setup_ddc(encoder); |
1312 | if (radeon_ddc_probe(radeon_connector, | ||
1313 | radeon_connector->requires_extended_probe)) | ||
1314 | ret = connector_status_connected; | ||
1315 | } | ||
1316 | |||
1317 | if ((ret == connector_status_disconnected) && | ||
1318 | radeon_connector->dac_load_detect) { | ||
1319 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); | ||
1320 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
1321 | if (encoder) { | ||
1322 | encoder_funcs = encoder->helper_private; | ||
1323 | ret = encoder_funcs->detect(encoder, connector); | ||
1324 | } | ||
1284 | } | 1325 | } |
1326 | } else { | ||
1285 | radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); | 1327 | radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); |
1286 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { | 1328 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { |
1287 | ret = connector_status_connected; | 1329 | ret = connector_status_connected; |
@@ -1297,16 +1339,6 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1297 | ret = connector_status_connected; | 1339 | ret = connector_status_connected; |
1298 | } | 1340 | } |
1299 | } | 1341 | } |
1300 | |||
1301 | if ((ret == connector_status_disconnected) && | ||
1302 | radeon_connector->dac_load_detect) { | ||
1303 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); | ||
1304 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
1305 | if (encoder) { | ||
1306 | encoder_funcs = encoder->helper_private; | ||
1307 | ret = encoder_funcs->detect(encoder, connector); | ||
1308 | } | ||
1309 | } | ||
1310 | } | 1342 | } |
1311 | 1343 | ||
1312 | radeon_connector_update_scratch_regs(connector, ret); | 1344 | radeon_connector_update_scratch_regs(connector, ret); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 440e6ecccc40..b51e15725c6e 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <drm/radeon_drm.h> | 32 | #include <drm/radeon_drm.h> |
33 | #include <linux/vgaarb.h> | 33 | #include <linux/vgaarb.h> |
34 | #include <linux/vga_switcheroo.h> | 34 | #include <linux/vga_switcheroo.h> |
35 | #include <linux/efi.h> | ||
35 | #include "radeon_reg.h" | 36 | #include "radeon_reg.h" |
36 | #include "radeon.h" | 37 | #include "radeon.h" |
37 | #include "atom.h" | 38 | #include "atom.h" |
@@ -300,6 +301,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 | |||
300 | mc->mc_vram_size = mc->aper_size; | 301 | mc->mc_vram_size = mc->aper_size; |
301 | } | 302 | } |
302 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | 303 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; |
304 | if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size) | ||
305 | mc->real_vram_size = radeon_vram_limit; | ||
303 | dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", | 306 | dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", |
304 | mc->mc_vram_size >> 20, mc->vram_start, | 307 | mc->mc_vram_size >> 20, mc->vram_start, |
305 | mc->vram_end, mc->real_vram_size >> 20); | 308 | mc->vram_end, mc->real_vram_size >> 20); |
@@ -348,6 +351,9 @@ bool radeon_card_posted(struct radeon_device *rdev) | |||
348 | { | 351 | { |
349 | uint32_t reg; | 352 | uint32_t reg; |
350 | 353 | ||
354 | if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) | ||
355 | return false; | ||
356 | |||
351 | /* first check CRTCs */ | 357 | /* first check CRTCs */ |
352 | if (ASIC_IS_DCE41(rdev)) { | 358 | if (ASIC_IS_DCE41(rdev)) { |
353 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | | 359 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 1a858944e4f3..6adb3e58affd 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -473,8 +473,8 @@ pflip_cleanup: | |||
473 | spin_lock_irqsave(&dev->event_lock, flags); | 473 | spin_lock_irqsave(&dev->event_lock, flags); |
474 | radeon_crtc->unpin_work = NULL; | 474 | radeon_crtc->unpin_work = NULL; |
475 | unlock_free: | 475 | unlock_free: |
476 | drm_gem_object_unreference_unlocked(old_radeon_fb->obj); | ||
477 | spin_unlock_irqrestore(&dev->event_lock, flags); | 476 | spin_unlock_irqrestore(&dev->event_lock, flags); |
477 | drm_gem_object_unreference_unlocked(old_radeon_fb->obj); | ||
478 | radeon_fence_unref(&work->fence); | 478 | radeon_fence_unref(&work->fence); |
479 | kfree(work); | 479 | kfree(work); |
480 | 480 | ||
@@ -707,16 +707,21 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
707 | radeon_router_select_ddc_port(radeon_connector); | 707 | radeon_router_select_ddc_port(radeon_connector); |
708 | 708 | ||
709 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || | 709 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || |
710 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { | 710 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || |
711 | radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) { | ||
711 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; | 712 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; |
713 | |||
712 | if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || | 714 | if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || |
713 | dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) | 715 | dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) |
714 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); | 716 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, |
715 | } | 717 | &dig->dp_i2c_bus->adapter); |
716 | if (!radeon_connector->ddc_bus) | 718 | else if (radeon_connector->ddc_bus && !radeon_connector->edid) |
717 | return -1; | 719 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, |
718 | if (!radeon_connector->edid) { | 720 | &radeon_connector->ddc_bus->adapter); |
719 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); | 721 | } else { |
722 | if (radeon_connector->ddc_bus && !radeon_connector->edid) | ||
723 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, | ||
724 | &radeon_connector->ddc_bus->adapter); | ||
720 | } | 725 | } |
721 | 726 | ||
722 | if (!radeon_connector->edid) { | 727 | if (!radeon_connector->edid) { |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index b293487e5aa3..319d85d7e759 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -2323,6 +2323,9 @@ radeon_add_atom_encoder(struct drm_device *dev, | |||
2323 | default: | 2323 | default: |
2324 | encoder->possible_crtcs = 0x3; | 2324 | encoder->possible_crtcs = 0x3; |
2325 | break; | 2325 | break; |
2326 | case 4: | ||
2327 | encoder->possible_crtcs = 0xf; | ||
2328 | break; | ||
2326 | case 6: | 2329 | case 6: |
2327 | encoder->possible_crtcs = 0x3f; | 2330 | encoder->possible_crtcs = 0x3f; |
2328 | break; | 2331 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index d09031c03e26..68820f5f6303 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -479,6 +479,7 @@ extern void radeon_dp_set_link_config(struct drm_connector *connector, | |||
479 | struct drm_display_mode *mode); | 479 | struct drm_display_mode *mode); |
480 | extern void radeon_dp_link_train(struct drm_encoder *encoder, | 480 | extern void radeon_dp_link_train(struct drm_encoder *encoder, |
481 | struct drm_connector *connector); | 481 | struct drm_connector *connector); |
482 | extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector); | ||
482 | extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); | 483 | extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); |
483 | extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); | 484 | extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); |
484 | extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); | 485 | extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); |
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index dee4a0c1b4b2..602fa3541c45 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
@@ -40,10 +40,14 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
40 | size = 1024 * 1024; | 40 | size = 1024 * 1024; |
41 | 41 | ||
42 | /* Number of tests = | 42 | /* Number of tests = |
43 | * (Total GTT - IB pool - writeback page - ring buffer) / test size | 43 | * (Total GTT - IB pool - writeback page - ring buffers) / test size |
44 | */ | 44 | */ |
45 | n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - | 45 | n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size; |
46 | rdev->cp.ring_size)) / size; | 46 | if (rdev->wb.wb_obj) |
47 | n -= RADEON_GPU_PAGE_SIZE; | ||
48 | if (rdev->ih.ring_obj) | ||
49 | n -= rdev->ih.ring_size; | ||
50 | n /= size; | ||
47 | 51 | ||
48 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); | 52 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); |
49 | if (!gtt_obj) { | 53 | if (!gtt_obj) { |
@@ -132,9 +136,15 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
132 | gtt_start++, vram_start++) { | 136 | gtt_start++, vram_start++) { |
133 | if (*vram_start != gtt_start) { | 137 | if (*vram_start != gtt_start) { |
134 | DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " | 138 | DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " |
135 | "expected 0x%p (GTT map 0x%p-0x%p)\n", | 139 | "expected 0x%p (GTT/VRAM offset " |
136 | i, *vram_start, gtt_start, gtt_map, | 140 | "0x%16llx/0x%16llx)\n", |
137 | gtt_end); | 141 | i, *vram_start, gtt_start, |
142 | (unsigned long long) | ||
143 | (gtt_addr - rdev->mc.gtt_start + | ||
144 | (void*)gtt_start - gtt_map), | ||
145 | (unsigned long long) | ||
146 | (vram_addr - rdev->mc.vram_start + | ||
147 | (void*)gtt_start - gtt_map)); | ||
138 | radeon_bo_kunmap(vram_obj); | 148 | radeon_bo_kunmap(vram_obj); |
139 | goto out_cleanup; | 149 | goto out_cleanup; |
140 | } | 150 | } |
@@ -175,9 +185,15 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
175 | gtt_start++, vram_start++) { | 185 | gtt_start++, vram_start++) { |
176 | if (*gtt_start != vram_start) { | 186 | if (*gtt_start != vram_start) { |
177 | DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " | 187 | DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " |
178 | "expected 0x%p (VRAM map 0x%p-0x%p)\n", | 188 | "expected 0x%p (VRAM/GTT offset " |
179 | i, *gtt_start, vram_start, vram_map, | 189 | "0x%16llx/0x%16llx)\n", |
180 | vram_end); | 190 | i, *gtt_start, vram_start, |
191 | (unsigned long long) | ||
192 | (vram_addr - rdev->mc.vram_start + | ||
193 | (void*)vram_start - vram_map), | ||
194 | (unsigned long long) | ||
195 | (gtt_addr - rdev->mc.gtt_start + | ||
196 | (void*)vram_start - vram_map)); | ||
181 | radeon_bo_kunmap(gtt_obj[i]); | 197 | radeon_bo_kunmap(gtt_obj[i]); |
182 | goto out_cleanup; | 198 | goto out_cleanup; |
183 | } | 199 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 60125ddba1e9..0b5468bfaf54 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
277 | DRM_ERROR("Trying to move memory with CP turned off.\n"); | 277 | DRM_ERROR("Trying to move memory with CP turned off.\n"); |
278 | return -EINVAL; | 278 | return -EINVAL; |
279 | } | 279 | } |
280 | r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); | 280 | |
281 | BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); | ||
282 | |||
283 | r = radeon_copy(rdev, old_start, new_start, | ||
284 | new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ | ||
285 | fence); | ||
281 | /* FIXME: handle copy error */ | 286 | /* FIXME: handle copy error */ |
282 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, | 287 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, |
283 | evict, no_wait_reserve, no_wait_gpu, new_mem); | 288 | evict, no_wait_reserve, no_wait_gpu, new_mem); |
@@ -450,6 +455,29 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ | |||
450 | return -EINVAL; | 455 | return -EINVAL; |
451 | mem->bus.base = rdev->mc.aper_base; | 456 | mem->bus.base = rdev->mc.aper_base; |
452 | mem->bus.is_iomem = true; | 457 | mem->bus.is_iomem = true; |
458 | #ifdef __alpha__ | ||
459 | /* | ||
460 | * Alpha: use bus.addr to hold the ioremap() return, | ||
461 | * so we can modify bus.base below. | ||
462 | */ | ||
463 | if (mem->placement & TTM_PL_FLAG_WC) | ||
464 | mem->bus.addr = | ||
465 | ioremap_wc(mem->bus.base + mem->bus.offset, | ||
466 | mem->bus.size); | ||
467 | else | ||
468 | mem->bus.addr = | ||
469 | ioremap_nocache(mem->bus.base + mem->bus.offset, | ||
470 | mem->bus.size); | ||
471 | |||
472 | /* | ||
473 | * Alpha: Use just the bus offset plus | ||
474 | * the hose/domain memory base for bus.base. | ||
475 | * It then can be used to build PTEs for VRAM | ||
476 | * access, as done in ttm_bo_vm_fault(). | ||
477 | */ | ||
478 | mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + | ||
479 | rdev->ddev->hose->dense_mem_base; | ||
480 | #endif | ||
453 | break; | 481 | break; |
454 | default: | 482 | default: |
455 | return -EINVAL; | 483 | return -EINVAL; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 56619f64b6bf..ef06194c5aa6 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -353,8 +353,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) | |||
353 | 353 | ||
354 | ret = ttm_tt_set_user(bo->ttm, current, | 354 | ret = ttm_tt_set_user(bo->ttm, current, |
355 | bo->buffer_start, bo->num_pages); | 355 | bo->buffer_start, bo->num_pages); |
356 | if (unlikely(ret != 0)) | 356 | if (unlikely(ret != 0)) { |
357 | ttm_tt_destroy(bo->ttm); | 357 | ttm_tt_destroy(bo->ttm); |
358 | bo->ttm = NULL; | ||
359 | } | ||
358 | break; | 360 | break; |
359 | default: | 361 | default: |
360 | printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); | 362 | printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); |
@@ -390,10 +392,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
390 | * Create and bind a ttm if required. | 392 | * Create and bind a ttm if required. |
391 | */ | 393 | */ |
392 | 394 | ||
393 | if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) { | 395 | if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
394 | ret = ttm_bo_add_ttm(bo, false); | 396 | if (bo->ttm == NULL) { |
395 | if (ret) | 397 | bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); |
396 | goto out_err; | 398 | ret = ttm_bo_add_ttm(bo, zero); |
399 | if (ret) | ||
400 | goto out_err; | ||
401 | } | ||
397 | 402 | ||
398 | ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); | 403 | ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); |
399 | if (ret) | 404 | if (ret) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 77dbf408c0d0..ae3c6f5dd2b7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -635,13 +635,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
635 | if (ret) | 635 | if (ret) |
636 | return ret; | 636 | return ret; |
637 | 637 | ||
638 | ttm_bo_free_old_node(bo); | ||
639 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && | 638 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
640 | (bo->ttm != NULL)) { | 639 | (bo->ttm != NULL)) { |
641 | ttm_tt_unbind(bo->ttm); | 640 | ttm_tt_unbind(bo->ttm); |
642 | ttm_tt_destroy(bo->ttm); | 641 | ttm_tt_destroy(bo->ttm); |
643 | bo->ttm = NULL; | 642 | bo->ttm = NULL; |
644 | } | 643 | } |
644 | ttm_bo_free_old_node(bo); | ||
645 | } else { | 645 | } else { |
646 | /** | 646 | /** |
647 | * This should help pipeline ordinary buffer moves. | 647 | * This should help pipeline ordinary buffer moves. |
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 306b15f39c9c..1130a8987125 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig | |||
@@ -589,6 +589,7 @@ config HID_WACOM_POWER_SUPPLY | |||
589 | config HID_WIIMOTE | 589 | config HID_WIIMOTE |
590 | tristate "Nintendo Wii Remote support" | 590 | tristate "Nintendo Wii Remote support" |
591 | depends on BT_HIDP | 591 | depends on BT_HIDP |
592 | depends on LEDS_CLASS | ||
592 | ---help--- | 593 | ---help--- |
593 | Support for the Nintendo Wii Remote bluetooth device. | 594 | Support for the Nintendo Wii Remote bluetooth device. |
594 | 595 | ||
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index b85744fe8464..18b3bc646bf3 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c | |||
@@ -444,6 +444,12 @@ static const struct hid_device_id apple_devices[] = { | |||
444 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS), | 444 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS), |
445 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | | 445 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | |
446 | APPLE_RDESC_JIS }, | 446 | APPLE_RDESC_JIS }, |
447 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI), | ||
448 | .driver_data = APPLE_HAS_FN }, | ||
449 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO), | ||
450 | .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, | ||
451 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS), | ||
452 | .driver_data = APPLE_HAS_FN }, | ||
447 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), | 453 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), |
448 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, | 454 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, |
449 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO), | 455 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO), |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 1a5cf0c9cfca..242353df3dc4 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -1340,6 +1340,9 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1340 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) }, | 1340 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) }, |
1341 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) }, | 1341 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) }, |
1342 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, | 1342 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, |
1343 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) }, | ||
1344 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) }, | ||
1345 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) }, | ||
1343 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, | 1346 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, |
1344 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, | 1347 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, |
1345 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, | 1348 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index db63ccf21cc8..7484e1b67249 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -109,6 +109,9 @@ | |||
109 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 | 109 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 |
110 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 | 110 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 |
111 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 | 111 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 |
112 | #define USB_DEVICE_ID_APPLE_ALU_REVB_ANSI 0x024f | ||
113 | #define USB_DEVICE_ID_APPLE_ALU_REVB_ISO 0x0250 | ||
114 | #define USB_DEVICE_ID_APPLE_ALU_REVB_JIS 0x0251 | ||
112 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239 | 115 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239 |
113 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a | 116 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a |
114 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b | 117 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b |
@@ -274,6 +277,7 @@ | |||
274 | #define USB_DEVICE_ID_PENPOWER 0x00f4 | 277 | #define USB_DEVICE_ID_PENPOWER 0x00f4 |
275 | 278 | ||
276 | #define USB_VENDOR_ID_GREENASIA 0x0e8f | 279 | #define USB_VENDOR_ID_GREENASIA 0x0e8f |
280 | #define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013 | ||
277 | 281 | ||
278 | #define USB_VENDOR_ID_GRETAGMACBETH 0x0971 | 282 | #define USB_VENDOR_ID_GRETAGMACBETH 0x0971 |
279 | #define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005 | 283 | #define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005 |
@@ -576,6 +580,9 @@ | |||
576 | #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 | 580 | #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 |
577 | #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600 | 581 | #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600 |
578 | 582 | ||
583 | #define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f | ||
584 | #define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002 | ||
585 | |||
579 | #define USB_VENDOR_ID_SKYCABLE 0x1223 | 586 | #define USB_VENDOR_ID_SKYCABLE 0x1223 |
580 | #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 | 587 | #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 |
581 | 588 | ||
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 0ec91c18a421..f0fbd7bd239e 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c | |||
@@ -81,6 +81,28 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie | |||
81 | #define NO_TOUCHES -1 | 81 | #define NO_TOUCHES -1 |
82 | #define SINGLE_TOUCH_UP -2 | 82 | #define SINGLE_TOUCH_UP -2 |
83 | 83 | ||
84 | /* Touch surface information. Dimension is in hundredths of a mm, min and max | ||
85 | * are in units. */ | ||
86 | #define MOUSE_DIMENSION_X (float)9056 | ||
87 | #define MOUSE_MIN_X -1100 | ||
88 | #define MOUSE_MAX_X 1258 | ||
89 | #define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100)) | ||
90 | #define MOUSE_DIMENSION_Y (float)5152 | ||
91 | #define MOUSE_MIN_Y -1589 | ||
92 | #define MOUSE_MAX_Y 2047 | ||
93 | #define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100)) | ||
94 | |||
95 | #define TRACKPAD_DIMENSION_X (float)13000 | ||
96 | #define TRACKPAD_MIN_X -2909 | ||
97 | #define TRACKPAD_MAX_X 3167 | ||
98 | #define TRACKPAD_RES_X \ | ||
99 | ((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100)) | ||
100 | #define TRACKPAD_DIMENSION_Y (float)11000 | ||
101 | #define TRACKPAD_MIN_Y -2456 | ||
102 | #define TRACKPAD_MAX_Y 2565 | ||
103 | #define TRACKPAD_RES_Y \ | ||
104 | ((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100)) | ||
105 | |||
84 | /** | 106 | /** |
85 | * struct magicmouse_sc - Tracks Magic Mouse-specific data. | 107 | * struct magicmouse_sc - Tracks Magic Mouse-specific data. |
86 | * @input: Input device through which we report events. | 108 | * @input: Input device through which we report events. |
@@ -406,17 +428,31 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h | |||
406 | * inverse of the reported Y. | 428 | * inverse of the reported Y. |
407 | */ | 429 | */ |
408 | if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { | 430 | if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { |
409 | input_set_abs_params(input, ABS_MT_POSITION_X, -1100, | 431 | input_set_abs_params(input, ABS_MT_POSITION_X, |
410 | 1358, 4, 0); | 432 | MOUSE_MIN_X, MOUSE_MAX_X, 4, 0); |
411 | input_set_abs_params(input, ABS_MT_POSITION_Y, -1589, | 433 | input_set_abs_params(input, ABS_MT_POSITION_Y, |
412 | 2047, 4, 0); | 434 | MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0); |
435 | |||
436 | input_abs_set_res(input, ABS_MT_POSITION_X, | ||
437 | MOUSE_RES_X); | ||
438 | input_abs_set_res(input, ABS_MT_POSITION_Y, | ||
439 | MOUSE_RES_Y); | ||
413 | } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ | 440 | } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ |
414 | input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0); | 441 | input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X, |
415 | input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0); | 442 | TRACKPAD_MAX_X, 4, 0); |
416 | input_set_abs_params(input, ABS_MT_POSITION_X, -2909, | 443 | input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y, |
417 | 3167, 4, 0); | 444 | TRACKPAD_MAX_Y, 4, 0); |
418 | input_set_abs_params(input, ABS_MT_POSITION_Y, -2456, | 445 | input_set_abs_params(input, ABS_MT_POSITION_X, |
419 | 2565, 4, 0); | 446 | TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0); |
447 | input_set_abs_params(input, ABS_MT_POSITION_Y, | ||
448 | TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0); | ||
449 | |||
450 | input_abs_set_res(input, ABS_X, TRACKPAD_RES_X); | ||
451 | input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y); | ||
452 | input_abs_set_res(input, ABS_MT_POSITION_X, | ||
453 | TRACKPAD_RES_X); | ||
454 | input_abs_set_res(input, ABS_MT_POSITION_Y, | ||
455 | TRACKPAD_RES_Y); | ||
420 | } | 456 | } |
421 | 457 | ||
422 | input_set_events_per_packet(input, 60); | 458 | input_set_events_per_packet(input, 60); |
@@ -501,9 +537,17 @@ static int magicmouse_probe(struct hid_device *hdev, | |||
501 | } | 537 | } |
502 | report->size = 6; | 538 | report->size = 6; |
503 | 539 | ||
540 | /* | ||
541 | * Some devices repond with 'invalid report id' when feature | ||
542 | * report switching it into multitouch mode is sent to it. | ||
543 | * | ||
544 | * This results in -EIO from the _raw low-level transport callback, | ||
545 | * but there seems to be no other way of switching the mode. | ||
546 | * Thus the super-ugly hacky success check below. | ||
547 | */ | ||
504 | ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature), | 548 | ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature), |
505 | HID_FEATURE_REPORT); | 549 | HID_FEATURE_REPORT); |
506 | if (ret != sizeof(feature)) { | 550 | if (ret != -EIO && ret != sizeof(feature)) { |
507 | hid_err(hdev, "unable to request touch data (%d)\n", ret); | 551 | hid_err(hdev, "unable to request touch data (%d)\n", ret); |
508 | goto err_stop_hw; | 552 | goto err_stop_hw; |
509 | } | 553 | } |
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c index 06888323828c..72ca689b6474 100644 --- a/drivers/hid/hid-wacom.c +++ b/drivers/hid/hid-wacom.c | |||
@@ -353,11 +353,7 @@ static int wacom_probe(struct hid_device *hdev, | |||
353 | if (ret) { | 353 | if (ret) { |
354 | hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n", | 354 | hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n", |
355 | ret); | 355 | ret); |
356 | /* | 356 | goto err_battery; |
357 | * battery attribute is not critical for the tablet, but if it | ||
358 | * failed then there is no need to create ac attribute | ||
359 | */ | ||
360 | goto move_on; | ||
361 | } | 357 | } |
362 | 358 | ||
363 | wdata->ac.properties = wacom_ac_props; | 359 | wdata->ac.properties = wacom_ac_props; |
@@ -371,18 +367,14 @@ static int wacom_probe(struct hid_device *hdev, | |||
371 | if (ret) { | 367 | if (ret) { |
372 | hid_warn(hdev, | 368 | hid_warn(hdev, |
373 | "can't create ac battery attribute, err: %d\n", ret); | 369 | "can't create ac battery attribute, err: %d\n", ret); |
374 | /* | 370 | goto err_ac; |
375 | * ac attribute is not critical for the tablet, but if it | ||
376 | * failed then we don't want to battery attribute to exist | ||
377 | */ | ||
378 | power_supply_unregister(&wdata->battery); | ||
379 | } | 371 | } |
380 | |||
381 | move_on: | ||
382 | #endif | 372 | #endif |
383 | hidinput = list_entry(hdev->inputs.next, struct hid_input, list); | 373 | hidinput = list_entry(hdev->inputs.next, struct hid_input, list); |
384 | input = hidinput->input; | 374 | input = hidinput->input; |
385 | 375 | ||
376 | __set_bit(INPUT_PROP_POINTER, input->propbit); | ||
377 | |||
386 | /* Basics */ | 378 | /* Basics */ |
387 | input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL); | 379 | input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL); |
388 | 380 | ||
@@ -416,6 +408,13 @@ move_on: | |||
416 | 408 | ||
417 | return 0; | 409 | return 0; |
418 | 410 | ||
411 | #ifdef CONFIG_HID_WACOM_POWER_SUPPLY | ||
412 | err_ac: | ||
413 | power_supply_unregister(&wdata->battery); | ||
414 | err_battery: | ||
415 | device_remove_file(&hdev->dev, &dev_attr_speed); | ||
416 | hid_hw_stop(hdev); | ||
417 | #endif | ||
419 | err_free: | 418 | err_free: |
420 | kfree(wdata); | 419 | kfree(wdata); |
421 | return ret; | 420 | return ret; |
@@ -426,6 +425,7 @@ static void wacom_remove(struct hid_device *hdev) | |||
426 | #ifdef CONFIG_HID_WACOM_POWER_SUPPLY | 425 | #ifdef CONFIG_HID_WACOM_POWER_SUPPLY |
427 | struct wacom_data *wdata = hid_get_drvdata(hdev); | 426 | struct wacom_data *wdata = hid_get_drvdata(hdev); |
428 | #endif | 427 | #endif |
428 | device_remove_file(&hdev->dev, &dev_attr_speed); | ||
429 | hid_hw_stop(hdev); | 429 | hid_hw_stop(hdev); |
430 | 430 | ||
431 | #ifdef CONFIG_HID_WACOM_POWER_SUPPLY | 431 | #ifdef CONFIG_HID_WACOM_POWER_SUPPLY |
diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c index a594383ce03d..85a02e5f9fe8 100644 --- a/drivers/hid/hid-wiimote.c +++ b/drivers/hid/hid-wiimote.c | |||
@@ -10,10 +10,10 @@ | |||
10 | * any later version. | 10 | * any later version. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/atomic.h> | ||
14 | #include <linux/device.h> | 13 | #include <linux/device.h> |
15 | #include <linux/hid.h> | 14 | #include <linux/hid.h> |
16 | #include <linux/input.h> | 15 | #include <linux/input.h> |
16 | #include <linux/leds.h> | ||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
19 | #include "hid-ids.h" | 19 | #include "hid-ids.h" |
@@ -33,9 +33,9 @@ struct wiimote_state { | |||
33 | }; | 33 | }; |
34 | 34 | ||
35 | struct wiimote_data { | 35 | struct wiimote_data { |
36 | atomic_t ready; | ||
37 | struct hid_device *hdev; | 36 | struct hid_device *hdev; |
38 | struct input_dev *input; | 37 | struct input_dev *input; |
38 | struct led_classdev *leds[4]; | ||
39 | 39 | ||
40 | spinlock_t qlock; | 40 | spinlock_t qlock; |
41 | __u8 head; | 41 | __u8 head; |
@@ -53,8 +53,15 @@ struct wiimote_data { | |||
53 | #define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \ | 53 | #define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \ |
54 | WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4) | 54 | WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4) |
55 | 55 | ||
56 | /* return flag for led \num */ | ||
57 | #define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1)) | ||
58 | |||
56 | enum wiiproto_reqs { | 59 | enum wiiproto_reqs { |
60 | WIIPROTO_REQ_NULL = 0x0, | ||
57 | WIIPROTO_REQ_LED = 0x11, | 61 | WIIPROTO_REQ_LED = 0x11, |
62 | WIIPROTO_REQ_DRM = 0x12, | ||
63 | WIIPROTO_REQ_STATUS = 0x20, | ||
64 | WIIPROTO_REQ_RETURN = 0x22, | ||
58 | WIIPROTO_REQ_DRM_K = 0x30, | 65 | WIIPROTO_REQ_DRM_K = 0x30, |
59 | }; | 66 | }; |
60 | 67 | ||
@@ -87,9 +94,6 @@ static __u16 wiiproto_keymap[] = { | |||
87 | BTN_MODE, /* WIIPROTO_KEY_HOME */ | 94 | BTN_MODE, /* WIIPROTO_KEY_HOME */ |
88 | }; | 95 | }; |
89 | 96 | ||
90 | #define dev_to_wii(pdev) hid_get_drvdata(container_of(pdev, struct hid_device, \ | ||
91 | dev)) | ||
92 | |||
93 | static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer, | 97 | static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer, |
94 | size_t count) | 98 | size_t count) |
95 | { | 99 | { |
@@ -192,66 +196,96 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds) | |||
192 | wiimote_queue(wdata, cmd, sizeof(cmd)); | 196 | wiimote_queue(wdata, cmd, sizeof(cmd)); |
193 | } | 197 | } |
194 | 198 | ||
195 | #define wiifs_led_show_set(num) \ | 199 | /* |
196 | static ssize_t wiifs_led_show_##num(struct device *dev, \ | 200 | * Check what peripherals of the wiimote are currently |
197 | struct device_attribute *attr, char *buf) \ | 201 | * active and select a proper DRM that supports all of |
198 | { \ | 202 | * the requested data inputs. |
199 | struct wiimote_data *wdata = dev_to_wii(dev); \ | 203 | */ |
200 | unsigned long flags; \ | 204 | static __u8 select_drm(struct wiimote_data *wdata) |
201 | int state; \ | 205 | { |
202 | \ | 206 | return WIIPROTO_REQ_DRM_K; |
203 | if (!atomic_read(&wdata->ready)) \ | 207 | } |
204 | return -EBUSY; \ | 208 | |
205 | \ | 209 | static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm) |
206 | spin_lock_irqsave(&wdata->state.lock, flags); \ | 210 | { |
207 | state = !!(wdata->state.flags & WIIPROTO_FLAG_LED##num); \ | 211 | __u8 cmd[3]; |
208 | spin_unlock_irqrestore(&wdata->state.lock, flags); \ | 212 | |
209 | \ | 213 | if (drm == WIIPROTO_REQ_NULL) |
210 | return sprintf(buf, "%d\n", state); \ | 214 | drm = select_drm(wdata); |
211 | } \ | 215 | |
212 | static ssize_t wiifs_led_set_##num(struct device *dev, \ | 216 | cmd[0] = WIIPROTO_REQ_DRM; |
213 | struct device_attribute *attr, const char *buf, size_t count) \ | 217 | cmd[1] = 0; |
214 | { \ | 218 | cmd[2] = drm; |
215 | struct wiimote_data *wdata = dev_to_wii(dev); \ | 219 | |
216 | int tmp = simple_strtoul(buf, NULL, 10); \ | 220 | wiimote_queue(wdata, cmd, sizeof(cmd)); |
217 | unsigned long flags; \ | 221 | } |
218 | __u8 state; \ | 222 | |
219 | \ | 223 | static enum led_brightness wiimote_leds_get(struct led_classdev *led_dev) |
220 | if (!atomic_read(&wdata->ready)) \ | 224 | { |
221 | return -EBUSY; \ | 225 | struct wiimote_data *wdata; |
222 | \ | 226 | struct device *dev = led_dev->dev->parent; |
223 | spin_lock_irqsave(&wdata->state.lock, flags); \ | 227 | int i; |
224 | \ | 228 | unsigned long flags; |
225 | state = wdata->state.flags; \ | 229 | bool value = false; |
226 | \ | 230 | |
227 | if (tmp) \ | 231 | wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev)); |
228 | wiiproto_req_leds(wdata, state | WIIPROTO_FLAG_LED##num);\ | 232 | |
229 | else \ | 233 | for (i = 0; i < 4; ++i) { |
230 | wiiproto_req_leds(wdata, state & ~WIIPROTO_FLAG_LED##num);\ | 234 | if (wdata->leds[i] == led_dev) { |
231 | \ | 235 | spin_lock_irqsave(&wdata->state.lock, flags); |
232 | spin_unlock_irqrestore(&wdata->state.lock, flags); \ | 236 | value = wdata->state.flags & WIIPROTO_FLAG_LED(i + 1); |
233 | \ | 237 | spin_unlock_irqrestore(&wdata->state.lock, flags); |
234 | return count; \ | 238 | break; |
235 | } \ | 239 | } |
236 | static DEVICE_ATTR(led##num, S_IRUGO | S_IWUSR, wiifs_led_show_##num, \ | 240 | } |
237 | wiifs_led_set_##num) | 241 | |
238 | 242 | return value ? LED_FULL : LED_OFF; | |
239 | wiifs_led_show_set(1); | 243 | } |
240 | wiifs_led_show_set(2); | 244 | |
241 | wiifs_led_show_set(3); | 245 | static void wiimote_leds_set(struct led_classdev *led_dev, |
242 | wiifs_led_show_set(4); | 246 | enum led_brightness value) |
247 | { | ||
248 | struct wiimote_data *wdata; | ||
249 | struct device *dev = led_dev->dev->parent; | ||
250 | int i; | ||
251 | unsigned long flags; | ||
252 | __u8 state, flag; | ||
253 | |||
254 | wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev)); | ||
255 | |||
256 | for (i = 0; i < 4; ++i) { | ||
257 | if (wdata->leds[i] == led_dev) { | ||
258 | flag = WIIPROTO_FLAG_LED(i + 1); | ||
259 | spin_lock_irqsave(&wdata->state.lock, flags); | ||
260 | state = wdata->state.flags; | ||
261 | if (value == LED_OFF) | ||
262 | wiiproto_req_leds(wdata, state & ~flag); | ||
263 | else | ||
264 | wiiproto_req_leds(wdata, state | flag); | ||
265 | spin_unlock_irqrestore(&wdata->state.lock, flags); | ||
266 | break; | ||
267 | } | ||
268 | } | ||
269 | } | ||
243 | 270 | ||
244 | static int wiimote_input_event(struct input_dev *dev, unsigned int type, | 271 | static int wiimote_input_event(struct input_dev *dev, unsigned int type, |
245 | unsigned int code, int value) | 272 | unsigned int code, int value) |
246 | { | 273 | { |
274 | return 0; | ||
275 | } | ||
276 | |||
277 | static int wiimote_input_open(struct input_dev *dev) | ||
278 | { | ||
247 | struct wiimote_data *wdata = input_get_drvdata(dev); | 279 | struct wiimote_data *wdata = input_get_drvdata(dev); |
248 | 280 | ||
249 | if (!atomic_read(&wdata->ready)) | 281 | return hid_hw_open(wdata->hdev); |
250 | return -EBUSY; | 282 | } |
251 | /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */ | ||
252 | smp_rmb(); | ||
253 | 283 | ||
254 | return 0; | 284 | static void wiimote_input_close(struct input_dev *dev) |
285 | { | ||
286 | struct wiimote_data *wdata = input_get_drvdata(dev); | ||
287 | |||
288 | hid_hw_close(wdata->hdev); | ||
255 | } | 289 | } |
256 | 290 | ||
257 | static void handler_keys(struct wiimote_data *wdata, const __u8 *payload) | 291 | static void handler_keys(struct wiimote_data *wdata, const __u8 *payload) |
@@ -281,6 +315,26 @@ static void handler_keys(struct wiimote_data *wdata, const __u8 *payload) | |||
281 | input_sync(wdata->input); | 315 | input_sync(wdata->input); |
282 | } | 316 | } |
283 | 317 | ||
318 | static void handler_status(struct wiimote_data *wdata, const __u8 *payload) | ||
319 | { | ||
320 | handler_keys(wdata, payload); | ||
321 | |||
322 | /* on status reports the drm is reset so we need to resend the drm */ | ||
323 | wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); | ||
324 | } | ||
325 | |||
326 | static void handler_return(struct wiimote_data *wdata, const __u8 *payload) | ||
327 | { | ||
328 | __u8 err = payload[3]; | ||
329 | __u8 cmd = payload[2]; | ||
330 | |||
331 | handler_keys(wdata, payload); | ||
332 | |||
333 | if (err) | ||
334 | hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err, | ||
335 | cmd); | ||
336 | } | ||
337 | |||
284 | struct wiiproto_handler { | 338 | struct wiiproto_handler { |
285 | __u8 id; | 339 | __u8 id; |
286 | size_t size; | 340 | size_t size; |
@@ -288,6 +342,8 @@ struct wiiproto_handler { | |||
288 | }; | 342 | }; |
289 | 343 | ||
290 | static struct wiiproto_handler handlers[] = { | 344 | static struct wiiproto_handler handlers[] = { |
345 | { .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status }, | ||
346 | { .id = WIIPROTO_REQ_RETURN, .size = 4, .func = handler_return }, | ||
291 | { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys }, | 347 | { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys }, |
292 | { .id = 0 } | 348 | { .id = 0 } |
293 | }; | 349 | }; |
@@ -300,11 +356,6 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report, | |||
300 | int i; | 356 | int i; |
301 | unsigned long flags; | 357 | unsigned long flags; |
302 | 358 | ||
303 | if (!atomic_read(&wdata->ready)) | ||
304 | return -EBUSY; | ||
305 | /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */ | ||
306 | smp_rmb(); | ||
307 | |||
308 | if (size < 1) | 359 | if (size < 1) |
309 | return -EINVAL; | 360 | return -EINVAL; |
310 | 361 | ||
@@ -321,6 +372,58 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report, | |||
321 | return 0; | 372 | return 0; |
322 | } | 373 | } |
323 | 374 | ||
375 | static void wiimote_leds_destroy(struct wiimote_data *wdata) | ||
376 | { | ||
377 | int i; | ||
378 | struct led_classdev *led; | ||
379 | |||
380 | for (i = 0; i < 4; ++i) { | ||
381 | if (wdata->leds[i]) { | ||
382 | led = wdata->leds[i]; | ||
383 | wdata->leds[i] = NULL; | ||
384 | led_classdev_unregister(led); | ||
385 | kfree(led); | ||
386 | } | ||
387 | } | ||
388 | } | ||
389 | |||
390 | static int wiimote_leds_create(struct wiimote_data *wdata) | ||
391 | { | ||
392 | int i, ret; | ||
393 | struct device *dev = &wdata->hdev->dev; | ||
394 | size_t namesz = strlen(dev_name(dev)) + 9; | ||
395 | struct led_classdev *led; | ||
396 | char *name; | ||
397 | |||
398 | for (i = 0; i < 4; ++i) { | ||
399 | led = kzalloc(sizeof(struct led_classdev) + namesz, GFP_KERNEL); | ||
400 | if (!led) { | ||
401 | ret = -ENOMEM; | ||
402 | goto err; | ||
403 | } | ||
404 | name = (void*)&led[1]; | ||
405 | snprintf(name, namesz, "%s:blue:p%d", dev_name(dev), i); | ||
406 | led->name = name; | ||
407 | led->brightness = 0; | ||
408 | led->max_brightness = 1; | ||
409 | led->brightness_get = wiimote_leds_get; | ||
410 | led->brightness_set = wiimote_leds_set; | ||
411 | |||
412 | ret = led_classdev_register(dev, led); | ||
413 | if (ret) { | ||
414 | kfree(led); | ||
415 | goto err; | ||
416 | } | ||
417 | wdata->leds[i] = led; | ||
418 | } | ||
419 | |||
420 | return 0; | ||
421 | |||
422 | err: | ||
423 | wiimote_leds_destroy(wdata); | ||
424 | return ret; | ||
425 | } | ||
426 | |||
324 | static struct wiimote_data *wiimote_create(struct hid_device *hdev) | 427 | static struct wiimote_data *wiimote_create(struct hid_device *hdev) |
325 | { | 428 | { |
326 | struct wiimote_data *wdata; | 429 | struct wiimote_data *wdata; |
@@ -341,6 +444,8 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev) | |||
341 | 444 | ||
342 | input_set_drvdata(wdata->input, wdata); | 445 | input_set_drvdata(wdata->input, wdata); |
343 | wdata->input->event = wiimote_input_event; | 446 | wdata->input->event = wiimote_input_event; |
447 | wdata->input->open = wiimote_input_open; | ||
448 | wdata->input->close = wiimote_input_close; | ||
344 | wdata->input->dev.parent = &wdata->hdev->dev; | 449 | wdata->input->dev.parent = &wdata->hdev->dev; |
345 | wdata->input->id.bustype = wdata->hdev->bus; | 450 | wdata->input->id.bustype = wdata->hdev->bus; |
346 | wdata->input->id.vendor = wdata->hdev->vendor; | 451 | wdata->input->id.vendor = wdata->hdev->vendor; |
@@ -362,6 +467,12 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev) | |||
362 | 467 | ||
363 | static void wiimote_destroy(struct wiimote_data *wdata) | 468 | static void wiimote_destroy(struct wiimote_data *wdata) |
364 | { | 469 | { |
470 | wiimote_leds_destroy(wdata); | ||
471 | |||
472 | input_unregister_device(wdata->input); | ||
473 | cancel_work_sync(&wdata->worker); | ||
474 | hid_hw_stop(wdata->hdev); | ||
475 | |||
365 | kfree(wdata); | 476 | kfree(wdata); |
366 | } | 477 | } |
367 | 478 | ||
@@ -377,19 +488,6 @@ static int wiimote_hid_probe(struct hid_device *hdev, | |||
377 | return -ENOMEM; | 488 | return -ENOMEM; |
378 | } | 489 | } |
379 | 490 | ||
380 | ret = device_create_file(&hdev->dev, &dev_attr_led1); | ||
381 | if (ret) | ||
382 | goto err; | ||
383 | ret = device_create_file(&hdev->dev, &dev_attr_led2); | ||
384 | if (ret) | ||
385 | goto err; | ||
386 | ret = device_create_file(&hdev->dev, &dev_attr_led3); | ||
387 | if (ret) | ||
388 | goto err; | ||
389 | ret = device_create_file(&hdev->dev, &dev_attr_led4); | ||
390 | if (ret) | ||
391 | goto err; | ||
392 | |||
393 | ret = hid_parse(hdev); | 491 | ret = hid_parse(hdev); |
394 | if (ret) { | 492 | if (ret) { |
395 | hid_err(hdev, "HID parse failed\n"); | 493 | hid_err(hdev, "HID parse failed\n"); |
@@ -408,9 +506,10 @@ static int wiimote_hid_probe(struct hid_device *hdev, | |||
408 | goto err_stop; | 506 | goto err_stop; |
409 | } | 507 | } |
410 | 508 | ||
411 | /* smp_wmb: Write wdata->xy first before wdata->ready is set to 1 */ | 509 | ret = wiimote_leds_create(wdata); |
412 | smp_wmb(); | 510 | if (ret) |
413 | atomic_set(&wdata->ready, 1); | 511 | goto err_free; |
512 | |||
414 | hid_info(hdev, "New device registered\n"); | 513 | hid_info(hdev, "New device registered\n"); |
415 | 514 | ||
416 | /* by default set led1 after device initialization */ | 515 | /* by default set led1 after device initialization */ |
@@ -420,15 +519,15 @@ static int wiimote_hid_probe(struct hid_device *hdev, | |||
420 | 519 | ||
421 | return 0; | 520 | return 0; |
422 | 521 | ||
522 | err_free: | ||
523 | wiimote_destroy(wdata); | ||
524 | return ret; | ||
525 | |||
423 | err_stop: | 526 | err_stop: |
424 | hid_hw_stop(hdev); | 527 | hid_hw_stop(hdev); |
425 | err: | 528 | err: |
426 | input_free_device(wdata->input); | 529 | input_free_device(wdata->input); |
427 | device_remove_file(&hdev->dev, &dev_attr_led1); | 530 | kfree(wdata); |
428 | device_remove_file(&hdev->dev, &dev_attr_led2); | ||
429 | device_remove_file(&hdev->dev, &dev_attr_led3); | ||
430 | device_remove_file(&hdev->dev, &dev_attr_led4); | ||
431 | wiimote_destroy(wdata); | ||
432 | return ret; | 531 | return ret; |
433 | } | 532 | } |
434 | 533 | ||
@@ -437,16 +536,6 @@ static void wiimote_hid_remove(struct hid_device *hdev) | |||
437 | struct wiimote_data *wdata = hid_get_drvdata(hdev); | 536 | struct wiimote_data *wdata = hid_get_drvdata(hdev); |
438 | 537 | ||
439 | hid_info(hdev, "Device removed\n"); | 538 | hid_info(hdev, "Device removed\n"); |
440 | |||
441 | device_remove_file(&hdev->dev, &dev_attr_led1); | ||
442 | device_remove_file(&hdev->dev, &dev_attr_led2); | ||
443 | device_remove_file(&hdev->dev, &dev_attr_led3); | ||
444 | device_remove_file(&hdev->dev, &dev_attr_led4); | ||
445 | |||
446 | hid_hw_stop(hdev); | ||
447 | input_unregister_device(wdata->input); | ||
448 | |||
449 | cancel_work_sync(&wdata->worker); | ||
450 | wiimote_destroy(wdata); | 539 | wiimote_destroy(wdata); |
451 | } | 540 | } |
452 | 541 | ||
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 621959d5cc42..3146fdcda272 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -47,6 +47,7 @@ static const struct hid_blacklist { | |||
47 | { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL }, | 47 | { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL }, |
48 | 48 | ||
49 | { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, | 49 | { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, |
50 | { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT }, | ||
50 | { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, | 51 | { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, |
51 | { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, | 52 | { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, |
52 | { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT }, | 53 | { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT }, |
@@ -89,6 +90,7 @@ static const struct hid_blacklist { | |||
89 | 90 | ||
90 | { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT }, | 91 | { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT }, |
91 | { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, | 92 | { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, |
93 | { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS }, | ||
92 | { 0, 0 } | 94 | { 0, 0 } |
93 | }; | 95 | }; |
94 | 96 | ||
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 59d83e83da7f..411257676133 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c | |||
@@ -601,7 +601,12 @@ static int create_core_data(struct platform_data *pdata, | |||
601 | err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx); | 601 | err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx); |
602 | if (!err) { | 602 | if (!err) { |
603 | tdata->attr_size += MAX_THRESH_ATTRS; | 603 | tdata->attr_size += MAX_THRESH_ATTRS; |
604 | tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000; | 604 | tdata->tmin = tdata->tjmax - |
605 | ((eax & THERM_MASK_THRESHOLD0) >> | ||
606 | THERM_SHIFT_THRESHOLD0) * 1000; | ||
607 | tdata->ttarget = tdata->tjmax - | ||
608 | ((eax & THERM_MASK_THRESHOLD1) >> | ||
609 | THERM_SHIFT_THRESHOLD1) * 1000; | ||
605 | } | 610 | } |
606 | 611 | ||
607 | pdata->core_data[attr_no] = tdata; | 612 | pdata->core_data[attr_no] = tdata; |
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c index c4c40be0edbf..d22f241b6a67 100644 --- a/drivers/hwmon/i5k_amb.c +++ b/drivers/hwmon/i5k_amb.c | |||
@@ -114,7 +114,6 @@ struct i5k_amb_data { | |||
114 | void __iomem *amb_mmio; | 114 | void __iomem *amb_mmio; |
115 | struct i5k_device_attribute *attrs; | 115 | struct i5k_device_attribute *attrs; |
116 | unsigned int num_attrs; | 116 | unsigned int num_attrs; |
117 | unsigned long chipset_id; | ||
118 | }; | 117 | }; |
119 | 118 | ||
120 | static ssize_t show_name(struct device *dev, struct device_attribute *devattr, | 119 | static ssize_t show_name(struct device *dev, struct device_attribute *devattr, |
@@ -444,8 +443,6 @@ static int __devinit i5k_find_amb_registers(struct i5k_amb_data *data, | |||
444 | goto out; | 443 | goto out; |
445 | } | 444 | } |
446 | 445 | ||
447 | data->chipset_id = devid; | ||
448 | |||
449 | res = 0; | 446 | res = 0; |
450 | out: | 447 | out: |
451 | pci_dev_put(pcidev); | 448 | pci_dev_put(pcidev); |
@@ -478,23 +475,13 @@ out: | |||
478 | return res; | 475 | return res; |
479 | } | 476 | } |
480 | 477 | ||
481 | static unsigned long i5k_channel_pci_id(struct i5k_amb_data *data, | 478 | static struct { |
482 | unsigned long channel) | 479 | unsigned long err; |
483 | { | 480 | unsigned long fbd0; |
484 | switch (data->chipset_id) { | 481 | } chipset_ids[] __devinitdata = { |
485 | case PCI_DEVICE_ID_INTEL_5000_ERR: | 482 | { PCI_DEVICE_ID_INTEL_5000_ERR, PCI_DEVICE_ID_INTEL_5000_FBD0 }, |
486 | return PCI_DEVICE_ID_INTEL_5000_FBD0 + channel; | 483 | { PCI_DEVICE_ID_INTEL_5400_ERR, PCI_DEVICE_ID_INTEL_5400_FBD0 }, |
487 | case PCI_DEVICE_ID_INTEL_5400_ERR: | 484 | { 0, 0 } |
488 | return PCI_DEVICE_ID_INTEL_5400_FBD0 + channel; | ||
489 | default: | ||
490 | BUG(); | ||
491 | } | ||
492 | } | ||
493 | |||
494 | static unsigned long chipset_ids[] = { | ||
495 | PCI_DEVICE_ID_INTEL_5000_ERR, | ||
496 | PCI_DEVICE_ID_INTEL_5400_ERR, | ||
497 | 0 | ||
498 | }; | 485 | }; |
499 | 486 | ||
500 | #ifdef MODULE | 487 | #ifdef MODULE |
@@ -510,8 +497,7 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev) | |||
510 | { | 497 | { |
511 | struct i5k_amb_data *data; | 498 | struct i5k_amb_data *data; |
512 | struct resource *reso; | 499 | struct resource *reso; |
513 | int i; | 500 | int i, res; |
514 | int res = -ENODEV; | ||
515 | 501 | ||
516 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 502 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
517 | if (!data) | 503 | if (!data) |
@@ -520,22 +506,22 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev) | |||
520 | /* Figure out where the AMB registers live */ | 506 | /* Figure out where the AMB registers live */ |
521 | i = 0; | 507 | i = 0; |
522 | do { | 508 | do { |
523 | res = i5k_find_amb_registers(data, chipset_ids[i]); | 509 | res = i5k_find_amb_registers(data, chipset_ids[i].err); |
510 | if (res == 0) | ||
511 | break; | ||
524 | i++; | 512 | i++; |
525 | } while (res && chipset_ids[i]); | 513 | } while (chipset_ids[i].err); |
526 | 514 | ||
527 | if (res) | 515 | if (res) |
528 | goto err; | 516 | goto err; |
529 | 517 | ||
530 | /* Copy the DIMM presence map for the first two channels */ | 518 | /* Copy the DIMM presence map for the first two channels */ |
531 | res = i5k_channel_probe(&data->amb_present[0], | 519 | res = i5k_channel_probe(&data->amb_present[0], chipset_ids[i].fbd0); |
532 | i5k_channel_pci_id(data, 0)); | ||
533 | if (res) | 520 | if (res) |
534 | goto err; | 521 | goto err; |
535 | 522 | ||
536 | /* Copy the DIMM presence map for the optional second two channels */ | 523 | /* Copy the DIMM presence map for the optional second two channels */ |
537 | i5k_channel_probe(&data->amb_present[2], | 524 | i5k_channel_probe(&data->amb_present[2], chipset_ids[i].fbd0 + 1); |
538 | i5k_channel_pci_id(data, 1)); | ||
539 | 525 | ||
540 | /* Set up resource regions */ | 526 | /* Set up resource regions */ |
541 | reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME); | 527 | reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME); |
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c index 1a409c5bc9bc..c316294c48b4 100644 --- a/drivers/hwmon/ibmaem.c +++ b/drivers/hwmon/ibmaem.c | |||
@@ -432,13 +432,15 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg, | |||
432 | aem_send_message(ipmi); | 432 | aem_send_message(ipmi); |
433 | 433 | ||
434 | res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT); | 434 | res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT); |
435 | if (!res) | 435 | if (!res) { |
436 | return -ETIMEDOUT; | 436 | res = -ETIMEDOUT; |
437 | goto out; | ||
438 | } | ||
437 | 439 | ||
438 | if (ipmi->rx_result || ipmi->rx_msg_len != rs_size || | 440 | if (ipmi->rx_result || ipmi->rx_msg_len != rs_size || |
439 | memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) { | 441 | memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) { |
440 | kfree(rs_resp); | 442 | res = -ENOENT; |
441 | return -ENOENT; | 443 | goto out; |
442 | } | 444 | } |
443 | 445 | ||
444 | switch (size) { | 446 | switch (size) { |
@@ -463,8 +465,11 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg, | |||
463 | break; | 465 | break; |
464 | } | 466 | } |
465 | } | 467 | } |
468 | res = 0; | ||
466 | 469 | ||
467 | return 0; | 470 | out: |
471 | kfree(rs_resp); | ||
472 | return res; | ||
468 | } | 473 | } |
469 | 474 | ||
470 | /* Update AEM energy registers */ | 475 | /* Update AEM energy registers */ |
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c index d94a24fdf4ba..dd2d7b9620c2 100644 --- a/drivers/hwmon/max16065.c +++ b/drivers/hwmon/max16065.c | |||
@@ -124,7 +124,7 @@ static inline int MV_TO_LIMIT(int mv, int range) | |||
124 | 124 | ||
125 | static inline int ADC_TO_CURR(int adc, int gain) | 125 | static inline int ADC_TO_CURR(int adc, int gain) |
126 | { | 126 | { |
127 | return adc * 1400000 / gain * 255; | 127 | return adc * 1400000 / (gain * 255); |
128 | } | 128 | } |
129 | 129 | ||
130 | /* | 130 | /* |
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index d7926f4336b5..eab11615dced 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c | |||
@@ -211,8 +211,7 @@ static int lookup_comp(struct ntc_data *data, | |||
211 | if (data->comp[mid].ohm <= ohm) { | 211 | if (data->comp[mid].ohm <= ohm) { |
212 | *i_low = mid; | 212 | *i_low = mid; |
213 | *i_high = mid - 1; | 213 | *i_high = mid - 1; |
214 | } | 214 | } else { |
215 | if (data->comp[mid].ohm > ohm) { | ||
216 | *i_low = mid + 1; | 215 | *i_low = mid + 1; |
217 | *i_high = mid; | 216 | *i_high = mid; |
218 | } | 217 | } |
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c index d4bc114572de..ac254fba551b 100644 --- a/drivers/hwmon/pmbus/lm25066.c +++ b/drivers/hwmon/pmbus/lm25066.c | |||
@@ -161,6 +161,17 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg, | |||
161 | return ret; | 161 | return ret; |
162 | } | 162 | } |
163 | 163 | ||
164 | static int lm25066_write_byte(struct i2c_client *client, int page, u8 value) | ||
165 | { | ||
166 | if (page > 1) | ||
167 | return -EINVAL; | ||
168 | |||
169 | if (page == 0) | ||
170 | return pmbus_write_byte(client, 0, value); | ||
171 | |||
172 | return 0; | ||
173 | } | ||
174 | |||
164 | static int lm25066_probe(struct i2c_client *client, | 175 | static int lm25066_probe(struct i2c_client *client, |
165 | const struct i2c_device_id *id) | 176 | const struct i2c_device_id *id) |
166 | { | 177 | { |
@@ -204,6 +215,7 @@ static int lm25066_probe(struct i2c_client *client, | |||
204 | 215 | ||
205 | info->read_word_data = lm25066_read_word_data; | 216 | info->read_word_data = lm25066_read_word_data; |
206 | info->write_word_data = lm25066_write_word_data; | 217 | info->write_word_data = lm25066_write_word_data; |
218 | info->write_byte = lm25066_write_byte; | ||
207 | 219 | ||
208 | switch (id->driver_data) { | 220 | switch (id->driver_data) { |
209 | case lm25066: | 221 | case lm25066: |
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index 0808d986d75b..a6ae20ffef6b 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h | |||
@@ -325,6 +325,7 @@ struct pmbus_driver_info { | |||
325 | int (*read_word_data)(struct i2c_client *client, int page, int reg); | 325 | int (*read_word_data)(struct i2c_client *client, int page, int reg); |
326 | int (*write_word_data)(struct i2c_client *client, int page, int reg, | 326 | int (*write_word_data)(struct i2c_client *client, int page, int reg, |
327 | u16 word); | 327 | u16 word); |
328 | int (*write_byte)(struct i2c_client *client, int page, u8 value); | ||
328 | /* | 329 | /* |
329 | * The identify function determines supported PMBus functionality. | 330 | * The identify function determines supported PMBus functionality. |
330 | * This function is only necessary if a chip driver supports multiple | 331 | * This function is only necessary if a chip driver supports multiple |
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 5c1b6cf31701..397fc59b5682 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c | |||
@@ -182,6 +182,24 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value) | |||
182 | } | 182 | } |
183 | EXPORT_SYMBOL_GPL(pmbus_write_byte); | 183 | EXPORT_SYMBOL_GPL(pmbus_write_byte); |
184 | 184 | ||
185 | /* | ||
186 | * _pmbus_write_byte() is similar to pmbus_write_byte(), but checks if | ||
187 | * a device specific mapping funcion exists and calls it if necessary. | ||
188 | */ | ||
189 | static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value) | ||
190 | { | ||
191 | struct pmbus_data *data = i2c_get_clientdata(client); | ||
192 | const struct pmbus_driver_info *info = data->info; | ||
193 | int status; | ||
194 | |||
195 | if (info->write_byte) { | ||
196 | status = info->write_byte(client, page, value); | ||
197 | if (status != -ENODATA) | ||
198 | return status; | ||
199 | } | ||
200 | return pmbus_write_byte(client, page, value); | ||
201 | } | ||
202 | |||
185 | int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word) | 203 | int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word) |
186 | { | 204 | { |
187 | int rv; | 205 | int rv; |
@@ -281,7 +299,7 @@ static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg) | |||
281 | 299 | ||
282 | static void pmbus_clear_fault_page(struct i2c_client *client, int page) | 300 | static void pmbus_clear_fault_page(struct i2c_client *client, int page) |
283 | { | 301 | { |
284 | pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS); | 302 | _pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS); |
285 | } | 303 | } |
286 | 304 | ||
287 | void pmbus_clear_faults(struct i2c_client *client) | 305 | void pmbus_clear_faults(struct i2c_client *client) |
@@ -960,6 +978,8 @@ static void pmbus_find_max_attr(struct i2c_client *client, | |||
960 | struct pmbus_limit_attr { | 978 | struct pmbus_limit_attr { |
961 | u16 reg; /* Limit register */ | 979 | u16 reg; /* Limit register */ |
962 | bool update; /* True if register needs updates */ | 980 | bool update; /* True if register needs updates */ |
981 | bool low; /* True if low limit; for limits with compare | ||
982 | functions only */ | ||
963 | const char *attr; /* Attribute name */ | 983 | const char *attr; /* Attribute name */ |
964 | const char *alarm; /* Alarm attribute name */ | 984 | const char *alarm; /* Alarm attribute name */ |
965 | u32 sbit; /* Alarm attribute status bit */ | 985 | u32 sbit; /* Alarm attribute status bit */ |
@@ -1011,7 +1031,8 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client, | |||
1011 | if (attr->compare) { | 1031 | if (attr->compare) { |
1012 | pmbus_add_boolean_cmp(data, name, | 1032 | pmbus_add_boolean_cmp(data, name, |
1013 | l->alarm, index, | 1033 | l->alarm, index, |
1014 | cbase, cindex, | 1034 | l->low ? cindex : cbase, |
1035 | l->low ? cbase : cindex, | ||
1015 | attr->sbase + page, l->sbit); | 1036 | attr->sbase + page, l->sbit); |
1016 | } else { | 1037 | } else { |
1017 | pmbus_add_boolean_reg(data, name, | 1038 | pmbus_add_boolean_reg(data, name, |
@@ -1348,11 +1369,13 @@ static const struct pmbus_sensor_attr power_attributes[] = { | |||
1348 | static const struct pmbus_limit_attr temp_limit_attrs[] = { | 1369 | static const struct pmbus_limit_attr temp_limit_attrs[] = { |
1349 | { | 1370 | { |
1350 | .reg = PMBUS_UT_WARN_LIMIT, | 1371 | .reg = PMBUS_UT_WARN_LIMIT, |
1372 | .low = true, | ||
1351 | .attr = "min", | 1373 | .attr = "min", |
1352 | .alarm = "min_alarm", | 1374 | .alarm = "min_alarm", |
1353 | .sbit = PB_TEMP_UT_WARNING, | 1375 | .sbit = PB_TEMP_UT_WARNING, |
1354 | }, { | 1376 | }, { |
1355 | .reg = PMBUS_UT_FAULT_LIMIT, | 1377 | .reg = PMBUS_UT_FAULT_LIMIT, |
1378 | .low = true, | ||
1356 | .attr = "lcrit", | 1379 | .attr = "lcrit", |
1357 | .alarm = "lcrit_alarm", | 1380 | .alarm = "lcrit_alarm", |
1358 | .sbit = PB_TEMP_UT_FAULT, | 1381 | .sbit = PB_TEMP_UT_FAULT, |
@@ -1381,11 +1404,13 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = { | |||
1381 | static const struct pmbus_limit_attr temp_limit_attrs23[] = { | 1404 | static const struct pmbus_limit_attr temp_limit_attrs23[] = { |
1382 | { | 1405 | { |
1383 | .reg = PMBUS_UT_WARN_LIMIT, | 1406 | .reg = PMBUS_UT_WARN_LIMIT, |
1407 | .low = true, | ||
1384 | .attr = "min", | 1408 | .attr = "min", |
1385 | .alarm = "min_alarm", | 1409 | .alarm = "min_alarm", |
1386 | .sbit = PB_TEMP_UT_WARNING, | 1410 | .sbit = PB_TEMP_UT_WARNING, |
1387 | }, { | 1411 | }, { |
1388 | .reg = PMBUS_UT_FAULT_LIMIT, | 1412 | .reg = PMBUS_UT_FAULT_LIMIT, |
1413 | .low = true, | ||
1389 | .attr = "lcrit", | 1414 | .attr = "lcrit", |
1390 | .alarm = "lcrit_alarm", | 1415 | .alarm = "lcrit_alarm", |
1391 | .sbit = PB_TEMP_UT_FAULT, | 1416 | .sbit = PB_TEMP_UT_FAULT, |
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c index ace1c7319734..d0ddb60155c9 100644 --- a/drivers/hwmon/pmbus/ucd9000.c +++ b/drivers/hwmon/pmbus/ucd9000.c | |||
@@ -141,13 +141,11 @@ static int ucd9000_probe(struct i2c_client *client, | |||
141 | block_buffer[ret] = '\0'; | 141 | block_buffer[ret] = '\0'; |
142 | dev_info(&client->dev, "Device ID %s\n", block_buffer); | 142 | dev_info(&client->dev, "Device ID %s\n", block_buffer); |
143 | 143 | ||
144 | mid = NULL; | 144 | for (mid = ucd9000_id; mid->name[0]; mid++) { |
145 | for (i = 0; i < ARRAY_SIZE(ucd9000_id); i++) { | ||
146 | mid = &ucd9000_id[i]; | ||
147 | if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) | 145 | if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) |
148 | break; | 146 | break; |
149 | } | 147 | } |
150 | if (!mid || !strlen(mid->name)) { | 148 | if (!mid->name[0]) { |
151 | dev_err(&client->dev, "Unsupported device\n"); | 149 | dev_err(&client->dev, "Unsupported device\n"); |
152 | return -ENODEV; | 150 | return -ENODEV; |
153 | } | 151 | } |
diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c index ffcc1cf3609d..c65e9da707cc 100644 --- a/drivers/hwmon/pmbus/ucd9200.c +++ b/drivers/hwmon/pmbus/ucd9200.c | |||
@@ -68,13 +68,11 @@ static int ucd9200_probe(struct i2c_client *client, | |||
68 | block_buffer[ret] = '\0'; | 68 | block_buffer[ret] = '\0'; |
69 | dev_info(&client->dev, "Device ID %s\n", block_buffer); | 69 | dev_info(&client->dev, "Device ID %s\n", block_buffer); |
70 | 70 | ||
71 | mid = NULL; | 71 | for (mid = ucd9200_id; mid->name[0]; mid++) { |
72 | for (i = 0; i < ARRAY_SIZE(ucd9200_id); i++) { | ||
73 | mid = &ucd9200_id[i]; | ||
74 | if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) | 72 | if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) |
75 | break; | 73 | break; |
76 | } | 74 | } |
77 | if (!mid || !strlen(mid->name)) { | 75 | if (!mid->name[0]) { |
78 | dev_err(&client->dev, "Unsupported device\n"); | 76 | dev_err(&client->dev, "Unsupported device\n"); |
79 | return -ENODEV; | 77 | return -ENODEV; |
80 | } | 78 | } |
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index 0c731ca69f15..b228e09c5d05 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c | |||
@@ -146,6 +146,7 @@ struct i2c_nmk_client { | |||
146 | * @stop: stop condition | 146 | * @stop: stop condition |
147 | * @xfer_complete: acknowledge completion for a I2C message | 147 | * @xfer_complete: acknowledge completion for a I2C message |
148 | * @result: controller propogated result | 148 | * @result: controller propogated result |
149 | * @regulator: pointer to i2c regulator | ||
149 | * @busy: Busy doing transfer | 150 | * @busy: Busy doing transfer |
150 | */ | 151 | */ |
151 | struct nmk_i2c_dev { | 152 | struct nmk_i2c_dev { |
@@ -417,12 +418,12 @@ static int read_i2c(struct nmk_i2c_dev *dev) | |||
417 | writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, | 418 | writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, |
418 | dev->virtbase + I2C_IMSCR); | 419 | dev->virtbase + I2C_IMSCR); |
419 | 420 | ||
420 | timeout = wait_for_completion_interruptible_timeout( | 421 | timeout = wait_for_completion_timeout( |
421 | &dev->xfer_complete, dev->adap.timeout); | 422 | &dev->xfer_complete, dev->adap.timeout); |
422 | 423 | ||
423 | if (timeout < 0) { | 424 | if (timeout < 0) { |
424 | dev_err(&dev->pdev->dev, | 425 | dev_err(&dev->pdev->dev, |
425 | "wait_for_completion_interruptible_timeout" | 426 | "wait_for_completion_timeout" |
426 | "returned %d waiting for event\n", timeout); | 427 | "returned %d waiting for event\n", timeout); |
427 | status = timeout; | 428 | status = timeout; |
428 | } | 429 | } |
@@ -504,12 +505,12 @@ static int write_i2c(struct nmk_i2c_dev *dev) | |||
504 | writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, | 505 | writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, |
505 | dev->virtbase + I2C_IMSCR); | 506 | dev->virtbase + I2C_IMSCR); |
506 | 507 | ||
507 | timeout = wait_for_completion_interruptible_timeout( | 508 | timeout = wait_for_completion_timeout( |
508 | &dev->xfer_complete, dev->adap.timeout); | 509 | &dev->xfer_complete, dev->adap.timeout); |
509 | 510 | ||
510 | if (timeout < 0) { | 511 | if (timeout < 0) { |
511 | dev_err(&dev->pdev->dev, | 512 | dev_err(&dev->pdev->dev, |
512 | "wait_for_completion_interruptible_timeout" | 513 | "wait_for_completion_timeout " |
513 | "returned %d waiting for event\n", timeout); | 514 | "returned %d waiting for event\n", timeout); |
514 | status = timeout; | 515 | status = timeout; |
515 | } | 516 | } |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 1a766cf74f6b..2dfb63176856 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -1139,41 +1139,12 @@ omap_i2c_remove(struct platform_device *pdev) | |||
1139 | return 0; | 1139 | return 0; |
1140 | } | 1140 | } |
1141 | 1141 | ||
1142 | #ifdef CONFIG_SUSPEND | ||
1143 | static int omap_i2c_suspend(struct device *dev) | ||
1144 | { | ||
1145 | if (!pm_runtime_suspended(dev)) | ||
1146 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) | ||
1147 | dev->bus->pm->runtime_suspend(dev); | ||
1148 | |||
1149 | return 0; | ||
1150 | } | ||
1151 | |||
1152 | static int omap_i2c_resume(struct device *dev) | ||
1153 | { | ||
1154 | if (!pm_runtime_suspended(dev)) | ||
1155 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) | ||
1156 | dev->bus->pm->runtime_resume(dev); | ||
1157 | |||
1158 | return 0; | ||
1159 | } | ||
1160 | |||
1161 | static struct dev_pm_ops omap_i2c_pm_ops = { | ||
1162 | .suspend = omap_i2c_suspend, | ||
1163 | .resume = omap_i2c_resume, | ||
1164 | }; | ||
1165 | #define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) | ||
1166 | #else | ||
1167 | #define OMAP_I2C_PM_OPS NULL | ||
1168 | #endif | ||
1169 | |||
1170 | static struct platform_driver omap_i2c_driver = { | 1142 | static struct platform_driver omap_i2c_driver = { |
1171 | .probe = omap_i2c_probe, | 1143 | .probe = omap_i2c_probe, |
1172 | .remove = omap_i2c_remove, | 1144 | .remove = omap_i2c_remove, |
1173 | .driver = { | 1145 | .driver = { |
1174 | .name = "omap_i2c", | 1146 | .name = "omap_i2c", |
1175 | .owner = THIS_MODULE, | 1147 | .owner = THIS_MODULE, |
1176 | .pm = OMAP_I2C_PM_OPS, | ||
1177 | }, | 1148 | }, |
1178 | }; | 1149 | }; |
1179 | 1150 | ||
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c index 6659d269b841..b73da6cd6f91 100644 --- a/drivers/i2c/busses/i2c-pxa-pci.c +++ b/drivers/i2c/busses/i2c-pxa-pci.c | |||
@@ -109,12 +109,15 @@ static int __devinit ce4100_i2c_probe(struct pci_dev *dev, | |||
109 | return -EINVAL; | 109 | return -EINVAL; |
110 | } | 110 | } |
111 | sds = kzalloc(sizeof(*sds), GFP_KERNEL); | 111 | sds = kzalloc(sizeof(*sds), GFP_KERNEL); |
112 | if (!sds) | 112 | if (!sds) { |
113 | ret = -ENOMEM; | ||
113 | goto err_mem; | 114 | goto err_mem; |
115 | } | ||
114 | 116 | ||
115 | for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) { | 117 | for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) { |
116 | sds->pdev[i] = add_i2c_device(dev, i); | 118 | sds->pdev[i] = add_i2c_device(dev, i); |
117 | if (IS_ERR(sds->pdev[i])) { | 119 | if (IS_ERR(sds->pdev[i])) { |
120 | ret = PTR_ERR(sds->pdev[i]); | ||
118 | while (--i >= 0) | 121 | while (--i >= 0) |
119 | platform_device_unregister(sds->pdev[i]); | 122 | platform_device_unregister(sds->pdev[i]); |
120 | goto err_dev_add; | 123 | goto err_dev_add; |
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 2440b7411978..3c94c4a81a55 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c | |||
@@ -270,14 +270,30 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev) | |||
270 | 270 | ||
271 | /* Rounds down to not include partial word at the end of buf */ | 271 | /* Rounds down to not include partial word at the end of buf */ |
272 | words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD; | 272 | words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD; |
273 | if (words_to_transfer > tx_fifo_avail) | ||
274 | words_to_transfer = tx_fifo_avail; | ||
275 | 273 | ||
276 | i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); | 274 | /* It's very common to have < 4 bytes, so optimize that case. */ |
277 | 275 | if (words_to_transfer) { | |
278 | buf += words_to_transfer * BYTES_PER_FIFO_WORD; | 276 | if (words_to_transfer > tx_fifo_avail) |
279 | buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; | 277 | words_to_transfer = tx_fifo_avail; |
280 | tx_fifo_avail -= words_to_transfer; | 278 | |
279 | /* | ||
280 | * Update state before writing to FIFO. If this casues us | ||
281 | * to finish writing all bytes (AKA buf_remaining goes to 0) we | ||
282 | * have a potential for an interrupt (PACKET_XFER_COMPLETE is | ||
283 | * not maskable). We need to make sure that the isr sees | ||
284 | * buf_remaining as 0 and doesn't call us back re-entrantly. | ||
285 | */ | ||
286 | buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; | ||
287 | tx_fifo_avail -= words_to_transfer; | ||
288 | i2c_dev->msg_buf_remaining = buf_remaining; | ||
289 | i2c_dev->msg_buf = buf + | ||
290 | words_to_transfer * BYTES_PER_FIFO_WORD; | ||
291 | barrier(); | ||
292 | |||
293 | i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); | ||
294 | |||
295 | buf += words_to_transfer * BYTES_PER_FIFO_WORD; | ||
296 | } | ||
281 | 297 | ||
282 | /* | 298 | /* |
283 | * If there is a partial word at the end of buf, handle it manually to | 299 | * If there is a partial word at the end of buf, handle it manually to |
@@ -287,14 +303,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev) | |||
287 | if (tx_fifo_avail > 0 && buf_remaining > 0) { | 303 | if (tx_fifo_avail > 0 && buf_remaining > 0) { |
288 | BUG_ON(buf_remaining > 3); | 304 | BUG_ON(buf_remaining > 3); |
289 | memcpy(&val, buf, buf_remaining); | 305 | memcpy(&val, buf, buf_remaining); |
306 | |||
307 | /* Again update before writing to FIFO to make sure isr sees. */ | ||
308 | i2c_dev->msg_buf_remaining = 0; | ||
309 | i2c_dev->msg_buf = NULL; | ||
310 | barrier(); | ||
311 | |||
290 | i2c_writel(i2c_dev, val, I2C_TX_FIFO); | 312 | i2c_writel(i2c_dev, val, I2C_TX_FIFO); |
291 | buf_remaining = 0; | ||
292 | tx_fifo_avail--; | ||
293 | } | 313 | } |
294 | 314 | ||
295 | BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0); | ||
296 | i2c_dev->msg_buf_remaining = buf_remaining; | ||
297 | i2c_dev->msg_buf = buf; | ||
298 | return 0; | 315 | return 0; |
299 | } | 316 | } |
300 | 317 | ||
@@ -411,9 +428,10 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id) | |||
411 | tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ); | 428 | tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ); |
412 | } | 429 | } |
413 | 430 | ||
414 | if ((status & I2C_INT_PACKET_XFER_COMPLETE) && | 431 | if (status & I2C_INT_PACKET_XFER_COMPLETE) { |
415 | !i2c_dev->msg_buf_remaining) | 432 | BUG_ON(i2c_dev->msg_buf_remaining); |
416 | complete(&i2c_dev->msg_complete); | 433 | complete(&i2c_dev->msg_complete); |
434 | } | ||
417 | 435 | ||
418 | i2c_writel(i2c_dev, status, I2C_INT_STATUS); | 436 | i2c_writel(i2c_dev, status, I2C_INT_STATUS); |
419 | if (i2c_dev->is_dvc) | 437 | if (i2c_dev->is_dvc) |
@@ -531,7 +549,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], | |||
531 | 549 | ||
532 | static u32 tegra_i2c_func(struct i2c_adapter *adap) | 550 | static u32 tegra_i2c_func(struct i2c_adapter *adap) |
533 | { | 551 | { |
534 | return I2C_FUNC_I2C; | 552 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; |
535 | } | 553 | } |
536 | 554 | ||
537 | static const struct i2c_algorithm tegra_i2c_algo = { | 555 | static const struct i2c_algorithm tegra_i2c_algo = { |
@@ -719,6 +737,17 @@ static int tegra_i2c_resume(struct platform_device *pdev) | |||
719 | } | 737 | } |
720 | #endif | 738 | #endif |
721 | 739 | ||
740 | #if defined(CONFIG_OF) | ||
741 | /* Match table for of_platform binding */ | ||
742 | static const struct of_device_id tegra_i2c_of_match[] __devinitconst = { | ||
743 | { .compatible = "nvidia,tegra20-i2c", }, | ||
744 | {}, | ||
745 | }; | ||
746 | MODULE_DEVICE_TABLE(of, tegra_i2c_of_match); | ||
747 | #else | ||
748 | #define tegra_i2c_of_match NULL | ||
749 | #endif | ||
750 | |||
722 | static struct platform_driver tegra_i2c_driver = { | 751 | static struct platform_driver tegra_i2c_driver = { |
723 | .probe = tegra_i2c_probe, | 752 | .probe = tegra_i2c_probe, |
724 | .remove = tegra_i2c_remove, | 753 | .remove = tegra_i2c_remove, |
@@ -729,6 +758,7 @@ static struct platform_driver tegra_i2c_driver = { | |||
729 | .driver = { | 758 | .driver = { |
730 | .name = "tegra-i2c", | 759 | .name = "tegra-i2c", |
731 | .owner = THIS_MODULE, | 760 | .owner = THIS_MODULE, |
761 | .of_match_table = tegra_i2c_of_match, | ||
732 | }, | 762 | }, |
733 | }; | 763 | }; |
734 | 764 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index aa30915c71ea..7567b6000230 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -717,11 +717,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
717 | { | 717 | { |
718 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 718 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
719 | struct ipoib_neigh *neigh; | 719 | struct ipoib_neigh *neigh; |
720 | struct neighbour *n; | 720 | struct neighbour *n = NULL; |
721 | unsigned long flags; | 721 | unsigned long flags; |
722 | 722 | ||
723 | n = dst_get_neighbour(skb_dst(skb)); | 723 | if (likely(skb_dst(skb))) |
724 | if (likely(skb_dst(skb) && n)) { | 724 | n = dst_get_neighbour(skb_dst(skb)); |
725 | |||
726 | if (likely(n)) { | ||
725 | if (unlikely(!*to_ipoib_neigh(n))) { | 727 | if (unlikely(!*to_ipoib_neigh(n))) { |
726 | ipoib_path_lookup(skb, dev); | 728 | ipoib_path_lookup(skb, dev); |
727 | return NETDEV_TX_OK; | 729 | return NETDEV_TX_OK; |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 8db008de5392..9c61b9c2c597 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -101,13 +101,17 @@ iscsi_iser_recv(struct iscsi_conn *conn, | |||
101 | 101 | ||
102 | /* verify PDU length */ | 102 | /* verify PDU length */ |
103 | datalen = ntoh24(hdr->dlength); | 103 | datalen = ntoh24(hdr->dlength); |
104 | if (datalen != rx_data_len) { | 104 | if (datalen > rx_data_len || (datalen + 4) < rx_data_len) { |
105 | printk(KERN_ERR "iscsi_iser: datalen %d (hdr) != %d (IB) \n", | 105 | iser_err("wrong datalen %d (hdr), %d (IB)\n", |
106 | datalen, rx_data_len); | 106 | datalen, rx_data_len); |
107 | rc = ISCSI_ERR_DATALEN; | 107 | rc = ISCSI_ERR_DATALEN; |
108 | goto error; | 108 | goto error; |
109 | } | 109 | } |
110 | 110 | ||
111 | if (datalen != rx_data_len) | ||
112 | iser_dbg("aligned datalen (%d) hdr, %d (IB)\n", | ||
113 | datalen, rx_data_len); | ||
114 | |||
111 | /* read AHS */ | 115 | /* read AHS */ |
112 | ahslen = hdr->hlength * 4; | 116 | ahslen = hdr->hlength * 4; |
113 | 117 | ||
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 342cbc1bdaae..db6f3ce9f3bf 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -89,7 +89,7 @@ | |||
89 | } while (0) | 89 | } while (0) |
90 | 90 | ||
91 | #define SHIFT_4K 12 | 91 | #define SHIFT_4K 12 |
92 | #define SIZE_4K (1UL << SHIFT_4K) | 92 | #define SIZE_4K (1ULL << SHIFT_4K) |
93 | #define MASK_4K (~(SIZE_4K-1)) | 93 | #define MASK_4K (~(SIZE_4K-1)) |
94 | 94 | ||
95 | /* support up to 512KB in one RDMA */ | 95 | /* support up to 512KB in one RDMA */ |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 5745b7fe158c..f299de6b419b 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -412,7 +412,7 @@ int iser_send_control(struct iscsi_conn *conn, | |||
412 | memcpy(iser_conn->ib_conn->login_buf, task->data, | 412 | memcpy(iser_conn->ib_conn->login_buf, task->data, |
413 | task->data_count); | 413 | task->data_count); |
414 | tx_dsg->addr = iser_conn->ib_conn->login_dma; | 414 | tx_dsg->addr = iser_conn->ib_conn->login_dma; |
415 | tx_dsg->length = data_seg_len; | 415 | tx_dsg->length = task->data_count; |
416 | tx_dsg->lkey = device->mr->lkey; | 416 | tx_dsg->lkey = device->mr->lkey; |
417 | mdesc->num_sge = 2; | 417 | mdesc->num_sge = 2; |
418 | } | 418 | } |
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 9882971827e6..358cd7ee905b 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c | |||
@@ -139,7 +139,7 @@ struct analog_port { | |||
139 | #include <linux/i8253.h> | 139 | #include <linux/i8253.h> |
140 | 140 | ||
141 | #define GET_TIME(x) do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0) | 141 | #define GET_TIME(x) do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0) |
142 | #define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? CLOCK_TICK_RATE / HZ : 0))) | 142 | #define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0))) |
143 | #define TIME_NAME (cpu_has_tsc?"TSC":"PIT") | 143 | #define TIME_NAME (cpu_has_tsc?"TSC":"PIT") |
144 | static unsigned int get_time_pit(void) | 144 | static unsigned int get_time_pit(void) |
145 | { | 145 | { |
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index 7b404e5443ed..e34eeb8ae371 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c | |||
@@ -668,4 +668,3 @@ module_exit(adp5588_exit); | |||
668 | MODULE_LICENSE("GPL"); | 668 | MODULE_LICENSE("GPL"); |
669 | MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); | 669 | MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); |
670 | MODULE_DESCRIPTION("ADP5588/87 Keypad driver"); | 670 | MODULE_DESCRIPTION("ADP5588/87 Keypad driver"); |
671 | MODULE_ALIAS("platform:adp5588-keys"); | ||
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c index c8242dd190d0..aa17e024d803 100644 --- a/drivers/input/keyboard/ep93xx_keypad.c +++ b/drivers/input/keyboard/ep93xx_keypad.c | |||
@@ -20,6 +20,7 @@ | |||
20 | * flag. | 20 | * flag. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/module.h> | ||
23 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
24 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
25 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index f270447ba951..a5a77915c650 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c | |||
@@ -702,7 +702,7 @@ err_iounmap: | |||
702 | err_free_mem_region: | 702 | err_free_mem_region: |
703 | release_mem_region(res->start, resource_size(res)); | 703 | release_mem_region(res->start, resource_size(res)); |
704 | err_free_mem: | 704 | err_free_mem: |
705 | input_free_device(kbc->idev); | 705 | input_free_device(input_dev); |
706 | kfree(kbc); | 706 | kfree(kbc); |
707 | 707 | ||
708 | return err; | 708 | return err; |
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c index e21deb1baa8a..025417d74ca2 100644 --- a/drivers/input/misc/ad714x-i2c.c +++ b/drivers/input/misc/ad714x-i2c.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * AD714X CapTouch Programmable Controller driver (I2C bus) | 2 | * AD714X CapTouch Programmable Controller driver (I2C bus) |
3 | * | 3 | * |
4 | * Copyright 2009 Analog Devices Inc. | 4 | * Copyright 2009-2011 Analog Devices Inc. |
5 | * | 5 | * |
6 | * Licensed under the GPL-2 or later. | 6 | * Licensed under the GPL-2 or later. |
7 | */ | 7 | */ |
@@ -27,54 +27,49 @@ static int ad714x_i2c_resume(struct device *dev) | |||
27 | 27 | ||
28 | static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume); | 28 | static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume); |
29 | 29 | ||
30 | static int ad714x_i2c_write(struct device *dev, unsigned short reg, | 30 | static int ad714x_i2c_write(struct ad714x_chip *chip, |
31 | unsigned short data) | 31 | unsigned short reg, unsigned short data) |
32 | { | 32 | { |
33 | struct i2c_client *client = to_i2c_client(dev); | 33 | struct i2c_client *client = to_i2c_client(chip->dev); |
34 | int ret = 0; | 34 | int error; |
35 | u8 *_reg = (u8 *)® | 35 | |
36 | u8 *_data = (u8 *)&data; | 36 | chip->xfer_buf[0] = cpu_to_be16(reg); |
37 | 37 | chip->xfer_buf[1] = cpu_to_be16(data); | |
38 | u8 tx[4] = { | 38 | |
39 | _reg[1], | 39 | error = i2c_master_send(client, (u8 *)chip->xfer_buf, |
40 | _reg[0], | 40 | 2 * sizeof(*chip->xfer_buf)); |
41 | _data[1], | 41 | if (unlikely(error < 0)) { |
42 | _data[0] | 42 | dev_err(&client->dev, "I2C write error: %d\n", error); |
43 | }; | 43 | return error; |
44 | 44 | } | |
45 | ret = i2c_master_send(client, tx, 4); | 45 | |
46 | if (ret < 0) | 46 | return 0; |
47 | dev_err(&client->dev, "I2C write error\n"); | ||
48 | |||
49 | return ret; | ||
50 | } | 47 | } |
51 | 48 | ||
52 | static int ad714x_i2c_read(struct device *dev, unsigned short reg, | 49 | static int ad714x_i2c_read(struct ad714x_chip *chip, |
53 | unsigned short *data) | 50 | unsigned short reg, unsigned short *data, size_t len) |
54 | { | 51 | { |
55 | struct i2c_client *client = to_i2c_client(dev); | 52 | struct i2c_client *client = to_i2c_client(chip->dev); |
56 | int ret = 0; | 53 | int i; |
57 | u8 *_reg = (u8 *)® | 54 | int error; |
58 | u8 *_data = (u8 *)data; | 55 | |
59 | 56 | chip->xfer_buf[0] = cpu_to_be16(reg); | |
60 | u8 tx[2] = { | 57 | |
61 | _reg[1], | 58 | error = i2c_master_send(client, (u8 *)chip->xfer_buf, |
62 | _reg[0] | 59 | sizeof(*chip->xfer_buf)); |
63 | }; | 60 | if (error >= 0) |
64 | u8 rx[2]; | 61 | error = i2c_master_recv(client, (u8 *)chip->xfer_buf, |
65 | 62 | len * sizeof(*chip->xfer_buf)); | |
66 | ret = i2c_master_send(client, tx, 2); | 63 | |
67 | if (ret >= 0) | 64 | if (unlikely(error < 0)) { |
68 | ret = i2c_master_recv(client, rx, 2); | 65 | dev_err(&client->dev, "I2C read error: %d\n", error); |
69 | 66 | return error; | |
70 | if (unlikely(ret < 0)) { | ||
71 | dev_err(&client->dev, "I2C read error\n"); | ||
72 | } else { | ||
73 | _data[0] = rx[1]; | ||
74 | _data[1] = rx[0]; | ||
75 | } | 67 | } |
76 | 68 | ||
77 | return ret; | 69 | for (i = 0; i < len; i++) |
70 | data[i] = be16_to_cpu(chip->xfer_buf[i]); | ||
71 | |||
72 | return 0; | ||
78 | } | 73 | } |
79 | 74 | ||
80 | static int __devinit ad714x_i2c_probe(struct i2c_client *client, | 75 | static int __devinit ad714x_i2c_probe(struct i2c_client *client, |
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c index 4120dd549305..875b50811361 100644 --- a/drivers/input/misc/ad714x-spi.c +++ b/drivers/input/misc/ad714x-spi.c | |||
@@ -1,12 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * AD714X CapTouch Programmable Controller driver (SPI bus) | 2 | * AD714X CapTouch Programmable Controller driver (SPI bus) |
3 | * | 3 | * |
4 | * Copyright 2009 Analog Devices Inc. | 4 | * Copyright 2009-2011 Analog Devices Inc. |
5 | * | 5 | * |
6 | * Licensed under the GPL-2 or later. | 6 | * Licensed under the GPL-2 or later. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/input.h> /* BUS_I2C */ | 9 | #include <linux/input.h> /* BUS_SPI */ |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/spi/spi.h> | 11 | #include <linux/spi/spi.h> |
12 | #include <linux/pm.h> | 12 | #include <linux/pm.h> |
@@ -30,30 +30,68 @@ static int ad714x_spi_resume(struct device *dev) | |||
30 | 30 | ||
31 | static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume); | 31 | static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume); |
32 | 32 | ||
33 | static int ad714x_spi_read(struct device *dev, unsigned short reg, | 33 | static int ad714x_spi_read(struct ad714x_chip *chip, |
34 | unsigned short *data) | 34 | unsigned short reg, unsigned short *data, size_t len) |
35 | { | 35 | { |
36 | struct spi_device *spi = to_spi_device(dev); | 36 | struct spi_device *spi = to_spi_device(chip->dev); |
37 | unsigned short tx = AD714x_SPI_CMD_PREFIX | AD714x_SPI_READ | reg; | 37 | struct spi_message message; |
38 | struct spi_transfer xfer[2]; | ||
39 | int i; | ||
40 | int error; | ||
41 | |||
42 | spi_message_init(&message); | ||
43 | memset(xfer, 0, sizeof(xfer)); | ||
44 | |||
45 | chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | | ||
46 | AD714x_SPI_READ | reg); | ||
47 | xfer[0].tx_buf = &chip->xfer_buf[0]; | ||
48 | xfer[0].len = sizeof(chip->xfer_buf[0]); | ||
49 | spi_message_add_tail(&xfer[0], &message); | ||
50 | |||
51 | xfer[1].rx_buf = &chip->xfer_buf[1]; | ||
52 | xfer[1].len = sizeof(chip->xfer_buf[1]) * len; | ||
53 | spi_message_add_tail(&xfer[1], &message); | ||
54 | |||
55 | error = spi_sync(spi, &message); | ||
56 | if (unlikely(error)) { | ||
57 | dev_err(chip->dev, "SPI read error: %d\n", error); | ||
58 | return error; | ||
59 | } | ||
60 | |||
61 | for (i = 0; i < len; i++) | ||
62 | data[i] = be16_to_cpu(chip->xfer_buf[i + 1]); | ||
38 | 63 | ||
39 | return spi_write_then_read(spi, (u8 *)&tx, 2, (u8 *)data, 2); | 64 | return 0; |
40 | } | 65 | } |
41 | 66 | ||
42 | static int ad714x_spi_write(struct device *dev, unsigned short reg, | 67 | static int ad714x_spi_write(struct ad714x_chip *chip, |
43 | unsigned short data) | 68 | unsigned short reg, unsigned short data) |
44 | { | 69 | { |
45 | struct spi_device *spi = to_spi_device(dev); | 70 | struct spi_device *spi = to_spi_device(chip->dev); |
46 | unsigned short tx[2] = { | 71 | int error; |
47 | AD714x_SPI_CMD_PREFIX | reg, | 72 | |
48 | data | 73 | chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | reg); |
49 | }; | 74 | chip->xfer_buf[1] = cpu_to_be16(data); |
75 | |||
76 | error = spi_write(spi, (u8 *)chip->xfer_buf, | ||
77 | 2 * sizeof(*chip->xfer_buf)); | ||
78 | if (unlikely(error)) { | ||
79 | dev_err(chip->dev, "SPI write error: %d\n", error); | ||
80 | return error; | ||
81 | } | ||
50 | 82 | ||
51 | return spi_write(spi, (u8 *)tx, 4); | 83 | return 0; |
52 | } | 84 | } |
53 | 85 | ||
54 | static int __devinit ad714x_spi_probe(struct spi_device *spi) | 86 | static int __devinit ad714x_spi_probe(struct spi_device *spi) |
55 | { | 87 | { |
56 | struct ad714x_chip *chip; | 88 | struct ad714x_chip *chip; |
89 | int err; | ||
90 | |||
91 | spi->bits_per_word = 8; | ||
92 | err = spi_setup(spi); | ||
93 | if (err < 0) | ||
94 | return err; | ||
57 | 95 | ||
58 | chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq, | 96 | chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq, |
59 | ad714x_spi_read, ad714x_spi_write); | 97 | ad714x_spi_read, ad714x_spi_write); |
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c index c3a62c42cd28..ca42c7d2a3c7 100644 --- a/drivers/input/misc/ad714x.c +++ b/drivers/input/misc/ad714x.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * AD714X CapTouch Programmable Controller driver supporting AD7142/3/7/8/7A | 2 | * AD714X CapTouch Programmable Controller driver supporting AD7142/3/7/8/7A |
3 | * | 3 | * |
4 | * Copyright 2009 Analog Devices Inc. | 4 | * Copyright 2009-2011 Analog Devices Inc. |
5 | * | 5 | * |
6 | * Licensed under the GPL-2 or later. | 6 | * Licensed under the GPL-2 or later. |
7 | */ | 7 | */ |
@@ -59,7 +59,6 @@ | |||
59 | #define STAGE11_AMBIENT 0x27D | 59 | #define STAGE11_AMBIENT 0x27D |
60 | 60 | ||
61 | #define PER_STAGE_REG_NUM 36 | 61 | #define PER_STAGE_REG_NUM 36 |
62 | #define STAGE_NUM 12 | ||
63 | #define STAGE_CFGREG_NUM 8 | 62 | #define STAGE_CFGREG_NUM 8 |
64 | #define SYS_CFGREG_NUM 8 | 63 | #define SYS_CFGREG_NUM 8 |
65 | 64 | ||
@@ -124,27 +123,6 @@ struct ad714x_driver_data { | |||
124 | * information to integrate all things which will be private data | 123 | * information to integrate all things which will be private data |
125 | * of spi/i2c device | 124 | * of spi/i2c device |
126 | */ | 125 | */ |
127 | struct ad714x_chip { | ||
128 | unsigned short h_state; | ||
129 | unsigned short l_state; | ||
130 | unsigned short c_state; | ||
131 | unsigned short adc_reg[STAGE_NUM]; | ||
132 | unsigned short amb_reg[STAGE_NUM]; | ||
133 | unsigned short sensor_val[STAGE_NUM]; | ||
134 | |||
135 | struct ad714x_platform_data *hw; | ||
136 | struct ad714x_driver_data *sw; | ||
137 | |||
138 | int irq; | ||
139 | struct device *dev; | ||
140 | ad714x_read_t read; | ||
141 | ad714x_write_t write; | ||
142 | |||
143 | struct mutex mutex; | ||
144 | |||
145 | unsigned product; | ||
146 | unsigned version; | ||
147 | }; | ||
148 | 126 | ||
149 | static void ad714x_use_com_int(struct ad714x_chip *ad714x, | 127 | static void ad714x_use_com_int(struct ad714x_chip *ad714x, |
150 | int start_stage, int end_stage) | 128 | int start_stage, int end_stage) |
@@ -154,13 +132,13 @@ static void ad714x_use_com_int(struct ad714x_chip *ad714x, | |||
154 | 132 | ||
155 | mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); | 133 | mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); |
156 | 134 | ||
157 | ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data); | 135 | ad714x->read(ad714x, STG_COM_INT_EN_REG, &data, 1); |
158 | data |= 1 << end_stage; | 136 | data |= 1 << end_stage; |
159 | ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data); | 137 | ad714x->write(ad714x, STG_COM_INT_EN_REG, data); |
160 | 138 | ||
161 | ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data); | 139 | ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data, 1); |
162 | data &= ~mask; | 140 | data &= ~mask; |
163 | ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data); | 141 | ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data); |
164 | } | 142 | } |
165 | 143 | ||
166 | static void ad714x_use_thr_int(struct ad714x_chip *ad714x, | 144 | static void ad714x_use_thr_int(struct ad714x_chip *ad714x, |
@@ -171,13 +149,13 @@ static void ad714x_use_thr_int(struct ad714x_chip *ad714x, | |||
171 | 149 | ||
172 | mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); | 150 | mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); |
173 | 151 | ||
174 | ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data); | 152 | ad714x->read(ad714x, STG_COM_INT_EN_REG, &data, 1); |
175 | data &= ~(1 << end_stage); | 153 | data &= ~(1 << end_stage); |
176 | ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data); | 154 | ad714x->write(ad714x, STG_COM_INT_EN_REG, data); |
177 | 155 | ||
178 | ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data); | 156 | ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data, 1); |
179 | data |= mask; | 157 | data |= mask; |
180 | ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data); | 158 | ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data); |
181 | } | 159 | } |
182 | 160 | ||
183 | static int ad714x_cal_highest_stage(struct ad714x_chip *ad714x, | 161 | static int ad714x_cal_highest_stage(struct ad714x_chip *ad714x, |
@@ -273,15 +251,16 @@ static void ad714x_slider_cal_sensor_val(struct ad714x_chip *ad714x, int idx) | |||
273 | struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx]; | 251 | struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx]; |
274 | int i; | 252 | int i; |
275 | 253 | ||
254 | ad714x->read(ad714x, CDC_RESULT_S0 + hw->start_stage, | ||
255 | &ad714x->adc_reg[hw->start_stage], | ||
256 | hw->end_stage - hw->start_stage + 1); | ||
257 | |||
276 | for (i = hw->start_stage; i <= hw->end_stage; i++) { | 258 | for (i = hw->start_stage; i <= hw->end_stage; i++) { |
277 | ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, | 259 | ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, |
278 | &ad714x->adc_reg[i]); | 260 | &ad714x->amb_reg[i], 1); |
279 | ad714x->read(ad714x->dev, | 261 | |
280 | STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, | 262 | ad714x->sensor_val[i] = |
281 | &ad714x->amb_reg[i]); | 263 | abs(ad714x->adc_reg[i] - ad714x->amb_reg[i]); |
282 | |||
283 | ad714x->sensor_val[i] = abs(ad714x->adc_reg[i] - | ||
284 | ad714x->amb_reg[i]); | ||
285 | } | 264 | } |
286 | } | 265 | } |
287 | 266 | ||
@@ -444,15 +423,16 @@ static void ad714x_wheel_cal_sensor_val(struct ad714x_chip *ad714x, int idx) | |||
444 | struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx]; | 423 | struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx]; |
445 | int i; | 424 | int i; |
446 | 425 | ||
426 | ad714x->read(ad714x, CDC_RESULT_S0 + hw->start_stage, | ||
427 | &ad714x->adc_reg[hw->start_stage], | ||
428 | hw->end_stage - hw->start_stage + 1); | ||
429 | |||
447 | for (i = hw->start_stage; i <= hw->end_stage; i++) { | 430 | for (i = hw->start_stage; i <= hw->end_stage; i++) { |
448 | ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, | 431 | ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, |
449 | &ad714x->adc_reg[i]); | 432 | &ad714x->amb_reg[i], 1); |
450 | ad714x->read(ad714x->dev, | ||
451 | STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, | ||
452 | &ad714x->amb_reg[i]); | ||
453 | if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) | 433 | if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) |
454 | ad714x->sensor_val[i] = ad714x->adc_reg[i] - | 434 | ad714x->sensor_val[i] = |
455 | ad714x->amb_reg[i]; | 435 | ad714x->adc_reg[i] - ad714x->amb_reg[i]; |
456 | else | 436 | else |
457 | ad714x->sensor_val[i] = 0; | 437 | ad714x->sensor_val[i] = 0; |
458 | } | 438 | } |
@@ -597,15 +577,16 @@ static void touchpad_cal_sensor_val(struct ad714x_chip *ad714x, int idx) | |||
597 | struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx]; | 577 | struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx]; |
598 | int i; | 578 | int i; |
599 | 579 | ||
580 | ad714x->read(ad714x, CDC_RESULT_S0 + hw->x_start_stage, | ||
581 | &ad714x->adc_reg[hw->x_start_stage], | ||
582 | hw->x_end_stage - hw->x_start_stage + 1); | ||
583 | |||
600 | for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) { | 584 | for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) { |
601 | ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, | 585 | ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, |
602 | &ad714x->adc_reg[i]); | 586 | &ad714x->amb_reg[i], 1); |
603 | ad714x->read(ad714x->dev, | ||
604 | STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, | ||
605 | &ad714x->amb_reg[i]); | ||
606 | if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) | 587 | if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) |
607 | ad714x->sensor_val[i] = ad714x->adc_reg[i] - | 588 | ad714x->sensor_val[i] = |
608 | ad714x->amb_reg[i]; | 589 | ad714x->adc_reg[i] - ad714x->amb_reg[i]; |
609 | else | 590 | else |
610 | ad714x->sensor_val[i] = 0; | 591 | ad714x->sensor_val[i] = 0; |
611 | } | 592 | } |
@@ -891,7 +872,7 @@ static int ad714x_hw_detect(struct ad714x_chip *ad714x) | |||
891 | { | 872 | { |
892 | unsigned short data; | 873 | unsigned short data; |
893 | 874 | ||
894 | ad714x->read(ad714x->dev, AD714X_PARTID_REG, &data); | 875 | ad714x->read(ad714x, AD714X_PARTID_REG, &data, 1); |
895 | switch (data & 0xFFF0) { | 876 | switch (data & 0xFFF0) { |
896 | case AD7142_PARTID: | 877 | case AD7142_PARTID: |
897 | ad714x->product = 0x7142; | 878 | ad714x->product = 0x7142; |
@@ -940,23 +921,20 @@ static void ad714x_hw_init(struct ad714x_chip *ad714x) | |||
940 | for (i = 0; i < STAGE_NUM; i++) { | 921 | for (i = 0; i < STAGE_NUM; i++) { |
941 | reg_base = AD714X_STAGECFG_REG + i * STAGE_CFGREG_NUM; | 922 | reg_base = AD714X_STAGECFG_REG + i * STAGE_CFGREG_NUM; |
942 | for (j = 0; j < STAGE_CFGREG_NUM; j++) | 923 | for (j = 0; j < STAGE_CFGREG_NUM; j++) |
943 | ad714x->write(ad714x->dev, reg_base + j, | 924 | ad714x->write(ad714x, reg_base + j, |
944 | ad714x->hw->stage_cfg_reg[i][j]); | 925 | ad714x->hw->stage_cfg_reg[i][j]); |
945 | } | 926 | } |
946 | 927 | ||
947 | for (i = 0; i < SYS_CFGREG_NUM; i++) | 928 | for (i = 0; i < SYS_CFGREG_NUM; i++) |
948 | ad714x->write(ad714x->dev, AD714X_SYSCFG_REG + i, | 929 | ad714x->write(ad714x, AD714X_SYSCFG_REG + i, |
949 | ad714x->hw->sys_cfg_reg[i]); | 930 | ad714x->hw->sys_cfg_reg[i]); |
950 | for (i = 0; i < SYS_CFGREG_NUM; i++) | 931 | for (i = 0; i < SYS_CFGREG_NUM; i++) |
951 | ad714x->read(ad714x->dev, AD714X_SYSCFG_REG + i, | 932 | ad714x->read(ad714x, AD714X_SYSCFG_REG + i, &data, 1); |
952 | &data); | ||
953 | 933 | ||
954 | ad714x->write(ad714x->dev, AD714X_STG_CAL_EN_REG, 0xFFF); | 934 | ad714x->write(ad714x, AD714X_STG_CAL_EN_REG, 0xFFF); |
955 | 935 | ||
956 | /* clear all interrupts */ | 936 | /* clear all interrupts */ |
957 | ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data); | 937 | ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3); |
958 | ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data); | ||
959 | ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data); | ||
960 | } | 938 | } |
961 | 939 | ||
962 | static irqreturn_t ad714x_interrupt_thread(int irq, void *data) | 940 | static irqreturn_t ad714x_interrupt_thread(int irq, void *data) |
@@ -966,9 +944,7 @@ static irqreturn_t ad714x_interrupt_thread(int irq, void *data) | |||
966 | 944 | ||
967 | mutex_lock(&ad714x->mutex); | 945 | mutex_lock(&ad714x->mutex); |
968 | 946 | ||
969 | ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &ad714x->l_state); | 947 | ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3); |
970 | ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &ad714x->h_state); | ||
971 | ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &ad714x->c_state); | ||
972 | 948 | ||
973 | for (i = 0; i < ad714x->hw->button_num; i++) | 949 | for (i = 0; i < ad714x->hw->button_num; i++) |
974 | ad714x_button_state_machine(ad714x, i); | 950 | ad714x_button_state_machine(ad714x, i); |
@@ -1245,7 +1221,7 @@ int ad714x_disable(struct ad714x_chip *ad714x) | |||
1245 | mutex_lock(&ad714x->mutex); | 1221 | mutex_lock(&ad714x->mutex); |
1246 | 1222 | ||
1247 | data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3; | 1223 | data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3; |
1248 | ad714x->write(ad714x->dev, AD714X_PWR_CTRL, data); | 1224 | ad714x->write(ad714x, AD714X_PWR_CTRL, data); |
1249 | 1225 | ||
1250 | mutex_unlock(&ad714x->mutex); | 1226 | mutex_unlock(&ad714x->mutex); |
1251 | 1227 | ||
@@ -1255,24 +1231,20 @@ EXPORT_SYMBOL(ad714x_disable); | |||
1255 | 1231 | ||
1256 | int ad714x_enable(struct ad714x_chip *ad714x) | 1232 | int ad714x_enable(struct ad714x_chip *ad714x) |
1257 | { | 1233 | { |
1258 | unsigned short data; | ||
1259 | |||
1260 | dev_dbg(ad714x->dev, "%s enter\n", __func__); | 1234 | dev_dbg(ad714x->dev, "%s enter\n", __func__); |
1261 | 1235 | ||
1262 | mutex_lock(&ad714x->mutex); | 1236 | mutex_lock(&ad714x->mutex); |
1263 | 1237 | ||
1264 | /* resume to non-shutdown mode */ | 1238 | /* resume to non-shutdown mode */ |
1265 | 1239 | ||
1266 | ad714x->write(ad714x->dev, AD714X_PWR_CTRL, | 1240 | ad714x->write(ad714x, AD714X_PWR_CTRL, |
1267 | ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL]); | 1241 | ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL]); |
1268 | 1242 | ||
1269 | /* make sure the interrupt output line is not low level after resume, | 1243 | /* make sure the interrupt output line is not low level after resume, |
1270 | * otherwise we will get no chance to enter falling-edge irq again | 1244 | * otherwise we will get no chance to enter falling-edge irq again |
1271 | */ | 1245 | */ |
1272 | 1246 | ||
1273 | ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data); | 1247 | ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3); |
1274 | ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data); | ||
1275 | ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data); | ||
1276 | 1248 | ||
1277 | mutex_unlock(&ad714x->mutex); | 1249 | mutex_unlock(&ad714x->mutex); |
1278 | 1250 | ||
diff --git a/drivers/input/misc/ad714x.h b/drivers/input/misc/ad714x.h index 45c54fb13f07..3c85455aa66d 100644 --- a/drivers/input/misc/ad714x.h +++ b/drivers/input/misc/ad714x.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * AD714X CapTouch Programmable Controller driver (bus interfaces) | 2 | * AD714X CapTouch Programmable Controller driver (bus interfaces) |
3 | * | 3 | * |
4 | * Copyright 2009 Analog Devices Inc. | 4 | * Copyright 2009-2011 Analog Devices Inc. |
5 | * | 5 | * |
6 | * Licensed under the GPL-2 or later. | 6 | * Licensed under the GPL-2 or later. |
7 | */ | 7 | */ |
@@ -11,11 +11,40 @@ | |||
11 | 11 | ||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | 13 | ||
14 | #define STAGE_NUM 12 | ||
15 | |||
14 | struct device; | 16 | struct device; |
17 | struct ad714x_platform_data; | ||
18 | struct ad714x_driver_data; | ||
15 | struct ad714x_chip; | 19 | struct ad714x_chip; |
16 | 20 | ||
17 | typedef int (*ad714x_read_t)(struct device *, unsigned short, unsigned short *); | 21 | typedef int (*ad714x_read_t)(struct ad714x_chip *, unsigned short, unsigned short *, size_t); |
18 | typedef int (*ad714x_write_t)(struct device *, unsigned short, unsigned short); | 22 | typedef int (*ad714x_write_t)(struct ad714x_chip *, unsigned short, unsigned short); |
23 | |||
24 | struct ad714x_chip { | ||
25 | unsigned short l_state; | ||
26 | unsigned short h_state; | ||
27 | unsigned short c_state; | ||
28 | unsigned short adc_reg[STAGE_NUM]; | ||
29 | unsigned short amb_reg[STAGE_NUM]; | ||
30 | unsigned short sensor_val[STAGE_NUM]; | ||
31 | |||
32 | struct ad714x_platform_data *hw; | ||
33 | struct ad714x_driver_data *sw; | ||
34 | |||
35 | int irq; | ||
36 | struct device *dev; | ||
37 | ad714x_read_t read; | ||
38 | ad714x_write_t write; | ||
39 | |||
40 | struct mutex mutex; | ||
41 | |||
42 | unsigned product; | ||
43 | unsigned version; | ||
44 | |||
45 | __be16 xfer_buf[16] ____cacheline_aligned; | ||
46 | |||
47 | }; | ||
19 | 48 | ||
20 | int ad714x_disable(struct ad714x_chip *ad714x); | 49 | int ad714x_disable(struct ad714x_chip *ad714x); |
21 | int ad714x_enable(struct ad714x_chip *ad714x); | 50 | int ad714x_enable(struct ad714x_chip *ad714x); |
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c index b09c7d127219..ab860511f016 100644 --- a/drivers/input/misc/cm109.c +++ b/drivers/input/misc/cm109.c | |||
@@ -475,7 +475,7 @@ static void cm109_toggle_buzzer_sync(struct cm109_dev *dev, int on) | |||
475 | le16_to_cpu(dev->ctl_req->wIndex), | 475 | le16_to_cpu(dev->ctl_req->wIndex), |
476 | dev->ctl_data, | 476 | dev->ctl_data, |
477 | USB_PKT_LEN, USB_CTRL_SET_TIMEOUT); | 477 | USB_PKT_LEN, USB_CTRL_SET_TIMEOUT); |
478 | if (error && error != EINTR) | 478 | if (error < 0 && error != -EINTR) |
479 | err("%s: usb_control_msg() failed %d", __func__, error); | 479 | err("%s: usb_control_msg() failed %d", __func__, error); |
480 | } | 480 | } |
481 | 481 | ||
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c index 6c76cf792991..0794778295fc 100644 --- a/drivers/input/misc/mma8450.c +++ b/drivers/input/misc/mma8450.c | |||
@@ -234,7 +234,7 @@ static const struct of_device_id mma8450_dt_ids[] = { | |||
234 | { .compatible = "fsl,mma8450", }, | 234 | { .compatible = "fsl,mma8450", }, |
235 | { /* sentinel */ } | 235 | { /* sentinel */ } |
236 | }; | 236 | }; |
237 | MODULE_DEVICE_TABLE(i2c, mma8450_dt_ids); | 237 | MODULE_DEVICE_TABLE(of, mma8450_dt_ids); |
238 | 238 | ||
239 | static struct i2c_driver mma8450_driver = { | 239 | static struct i2c_driver mma8450_driver = { |
240 | .driver = { | 240 | .driver = { |
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c index b95fac15b2ea..f71dc728da58 100644 --- a/drivers/input/misc/mpu3050.c +++ b/drivers/input/misc/mpu3050.c | |||
@@ -282,7 +282,7 @@ err_free_irq: | |||
282 | err_pm_set_suspended: | 282 | err_pm_set_suspended: |
283 | pm_runtime_set_suspended(&client->dev); | 283 | pm_runtime_set_suspended(&client->dev); |
284 | err_free_mem: | 284 | err_free_mem: |
285 | input_unregister_device(idev); | 285 | input_free_device(idev); |
286 | kfree(sensor); | 286 | kfree(sensor); |
287 | return error; | 287 | return error; |
288 | } | 288 | } |
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 3126983c004a..5ec617e28f7e 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c | |||
@@ -67,6 +67,18 @@ | |||
67 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 | 67 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 |
68 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 | 68 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 |
69 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 | 69 | #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 |
70 | /* MacbookAir4,1 (unibody, July 2011) */ | ||
71 | #define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249 | ||
72 | #define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a | ||
73 | #define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b | ||
74 | /* MacbookAir4,2 (unibody, July 2011) */ | ||
75 | #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c | ||
76 | #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d | ||
77 | #define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e | ||
78 | /* Macbook8,2 (unibody) */ | ||
79 | #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252 | ||
80 | #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253 | ||
81 | #define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254 | ||
70 | 82 | ||
71 | #define BCM5974_DEVICE(prod) { \ | 83 | #define BCM5974_DEVICE(prod) { \ |
72 | .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ | 84 | .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ |
@@ -104,6 +116,18 @@ static const struct usb_device_id bcm5974_table[] = { | |||
104 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), | 116 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), |
105 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), | 117 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), |
106 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), | 118 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), |
119 | /* MacbookAir4,1 */ | ||
120 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI), | ||
121 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO), | ||
122 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS), | ||
123 | /* MacbookAir4,2 */ | ||
124 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI), | ||
125 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO), | ||
126 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_JIS), | ||
127 | /* MacbookPro8,2 */ | ||
128 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI), | ||
129 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO), | ||
130 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS), | ||
107 | /* Terminating entry */ | 131 | /* Terminating entry */ |
108 | {} | 132 | {} |
109 | }; | 133 | }; |
@@ -294,6 +318,42 @@ static const struct bcm5974_config bcm5974_config_table[] = { | |||
294 | { DIM_X, DIM_X / SN_COORD, -4415, 5050 }, | 318 | { DIM_X, DIM_X / SN_COORD, -4415, 5050 }, |
295 | { DIM_Y, DIM_Y / SN_COORD, -55, 6680 } | 319 | { DIM_Y, DIM_Y / SN_COORD, -55, 6680 } |
296 | }, | 320 | }, |
321 | { | ||
322 | USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI, | ||
323 | USB_DEVICE_ID_APPLE_WELLSPRING6_ISO, | ||
324 | USB_DEVICE_ID_APPLE_WELLSPRING6_JIS, | ||
325 | HAS_INTEGRATED_BUTTON, | ||
326 | 0x84, sizeof(struct bt_data), | ||
327 | 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, | ||
328 | { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, | ||
329 | { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, | ||
330 | { DIM_X, DIM_X / SN_COORD, -4620, 5140 }, | ||
331 | { DIM_Y, DIM_Y / SN_COORD, -150, 6600 } | ||
332 | }, | ||
333 | { | ||
334 | USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI, | ||
335 | USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO, | ||
336 | USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS, | ||
337 | HAS_INTEGRATED_BUTTON, | ||
338 | 0x84, sizeof(struct bt_data), | ||
339 | 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, | ||
340 | { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, | ||
341 | { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, | ||
342 | { DIM_X, DIM_X / SN_COORD, -4750, 5280 }, | ||
343 | { DIM_Y, DIM_Y / SN_COORD, -150, 6730 } | ||
344 | }, | ||
345 | { | ||
346 | USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI, | ||
347 | USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO, | ||
348 | USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS, | ||
349 | HAS_INTEGRATED_BUTTON, | ||
350 | 0x84, sizeof(struct bt_data), | ||
351 | 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, | ||
352 | { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, | ||
353 | { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, | ||
354 | { DIM_X, DIM_X / SN_COORD, -4620, 5140 }, | ||
355 | { DIM_Y, DIM_Y / SN_COORD, -150, 6600 } | ||
356 | }, | ||
297 | {} | 357 | {} |
298 | }; | 358 | }; |
299 | 359 | ||
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index 449c0a46dbac..958b4eb6369d 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c | |||
@@ -49,6 +49,7 @@ struct hid_descriptor { | |||
49 | #define USB_REQ_GET_REPORT 0x01 | 49 | #define USB_REQ_GET_REPORT 0x01 |
50 | #define USB_REQ_SET_REPORT 0x09 | 50 | #define USB_REQ_SET_REPORT 0x09 |
51 | #define WAC_HID_FEATURE_REPORT 0x03 | 51 | #define WAC_HID_FEATURE_REPORT 0x03 |
52 | #define WAC_MSG_RETRIES 5 | ||
52 | 53 | ||
53 | static int usb_get_report(struct usb_interface *intf, unsigned char type, | 54 | static int usb_get_report(struct usb_interface *intf, unsigned char type, |
54 | unsigned char id, void *buf, int size) | 55 | unsigned char id, void *buf, int size) |
@@ -165,7 +166,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi | |||
165 | report, | 166 | report, |
166 | hid_desc->wDescriptorLength, | 167 | hid_desc->wDescriptorLength, |
167 | 5000); /* 5 secs */ | 168 | 5000); /* 5 secs */ |
168 | } while (result < 0 && limit++ < 5); | 169 | } while (result < 0 && limit++ < WAC_MSG_RETRIES); |
169 | 170 | ||
170 | /* No need to parse the Descriptor. It isn't an error though */ | 171 | /* No need to parse the Descriptor. It isn't an error though */ |
171 | if (result < 0) | 172 | if (result < 0) |
@@ -228,13 +229,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi | |||
228 | get_unaligned_le16(&report[i + 3]); | 229 | get_unaligned_le16(&report[i + 3]); |
229 | i += 4; | 230 | i += 4; |
230 | } | 231 | } |
231 | } else if (usage == WCM_DIGITIZER) { | ||
232 | /* max pressure isn't reported | ||
233 | features->pressure_max = (unsigned short) | ||
234 | (report[i+4] << 8 | report[i + 3]); | ||
235 | */ | ||
236 | features->pressure_max = 255; | ||
237 | i += 4; | ||
238 | } | 232 | } |
239 | break; | 233 | break; |
240 | 234 | ||
@@ -290,13 +284,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi | |||
290 | pen = 1; | 284 | pen = 1; |
291 | i++; | 285 | i++; |
292 | break; | 286 | break; |
293 | |||
294 | case HID_USAGE_UNDEFINED: | ||
295 | if (usage == WCM_DESKTOP && finger) /* capacity */ | ||
296 | features->pressure_max = | ||
297 | get_unaligned_le16(&report[i + 3]); | ||
298 | i += 4; | ||
299 | break; | ||
300 | } | 287 | } |
301 | break; | 288 | break; |
302 | 289 | ||
@@ -319,24 +306,26 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat | |||
319 | int limit = 0, report_id = 2; | 306 | int limit = 0, report_id = 2; |
320 | int error = -ENOMEM; | 307 | int error = -ENOMEM; |
321 | 308 | ||
322 | rep_data = kmalloc(2, GFP_KERNEL); | 309 | rep_data = kmalloc(4, GFP_KERNEL); |
323 | if (!rep_data) | 310 | if (!rep_data) |
324 | return error; | 311 | return error; |
325 | 312 | ||
326 | /* ask to report tablet data if it is 2FGT Tablet PC or | 313 | /* ask to report tablet data if it is MT Tablet PC or |
327 | * not a Tablet PC */ | 314 | * not a Tablet PC */ |
328 | if (features->type == TABLETPC2FG) { | 315 | if (features->type == TABLETPC2FG) { |
329 | do { | 316 | do { |
330 | rep_data[0] = 3; | 317 | rep_data[0] = 3; |
331 | rep_data[1] = 4; | 318 | rep_data[1] = 4; |
319 | rep_data[2] = 0; | ||
320 | rep_data[3] = 0; | ||
332 | report_id = 3; | 321 | report_id = 3; |
333 | error = usb_set_report(intf, WAC_HID_FEATURE_REPORT, | 322 | error = usb_set_report(intf, WAC_HID_FEATURE_REPORT, |
334 | report_id, rep_data, 2); | 323 | report_id, rep_data, 4); |
335 | if (error >= 0) | 324 | if (error >= 0) |
336 | error = usb_get_report(intf, | 325 | error = usb_get_report(intf, |
337 | WAC_HID_FEATURE_REPORT, report_id, | 326 | WAC_HID_FEATURE_REPORT, report_id, |
338 | rep_data, 3); | 327 | rep_data, 4); |
339 | } while ((error < 0 || rep_data[1] != 4) && limit++ < 5); | 328 | } while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES); |
340 | } else if (features->type != TABLETPC) { | 329 | } else if (features->type != TABLETPC) { |
341 | do { | 330 | do { |
342 | rep_data[0] = 2; | 331 | rep_data[0] = 2; |
@@ -347,7 +336,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat | |||
347 | error = usb_get_report(intf, | 336 | error = usb_get_report(intf, |
348 | WAC_HID_FEATURE_REPORT, report_id, | 337 | WAC_HID_FEATURE_REPORT, report_id, |
349 | rep_data, 2); | 338 | rep_data, 2); |
350 | } while ((error < 0 || rep_data[1] != 2) && limit++ < 5); | 339 | } while ((error < 0 || rep_data[1] != 2) && limit++ < WAC_MSG_RETRIES); |
351 | } | 340 | } |
352 | 341 | ||
353 | kfree(rep_data); | 342 | kfree(rep_data); |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 03ebcc8b24b5..0dc97ec15c28 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
@@ -800,25 +800,26 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) | |||
800 | int i; | 800 | int i; |
801 | 801 | ||
802 | for (i = 0; i < 2; i++) { | 802 | for (i = 0; i < 2; i++) { |
803 | int p = data[9 * i + 2]; | 803 | int offset = (data[1] & 0x80) ? (8 * i) : (9 * i); |
804 | bool touch = p && !wacom->shared->stylus_in_proximity; | 804 | bool touch = data[offset + 3] & 0x80; |
805 | 805 | ||
806 | input_mt_slot(input, i); | ||
807 | input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); | ||
808 | /* | 806 | /* |
809 | * Touch events need to be disabled while stylus is | 807 | * Touch events need to be disabled while stylus is |
810 | * in proximity because user's hand is resting on touchpad | 808 | * in proximity because user's hand is resting on touchpad |
811 | * and sending unwanted events. User expects tablet buttons | 809 | * and sending unwanted events. User expects tablet buttons |
812 | * to continue working though. | 810 | * to continue working though. |
813 | */ | 811 | */ |
812 | touch = touch && !wacom->shared->stylus_in_proximity; | ||
813 | |||
814 | input_mt_slot(input, i); | ||
815 | input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); | ||
814 | if (touch) { | 816 | if (touch) { |
815 | int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff; | 817 | int x = get_unaligned_be16(&data[offset + 3]) & 0x7ff; |
816 | int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff; | 818 | int y = get_unaligned_be16(&data[offset + 5]) & 0x7ff; |
817 | if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) { | 819 | if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) { |
818 | x <<= 5; | 820 | x <<= 5; |
819 | y <<= 5; | 821 | y <<= 5; |
820 | } | 822 | } |
821 | input_report_abs(input, ABS_MT_PRESSURE, p); | ||
822 | input_report_abs(input, ABS_MT_POSITION_X, x); | 823 | input_report_abs(input, ABS_MT_POSITION_X, x); |
823 | input_report_abs(input, ABS_MT_POSITION_Y, y); | 824 | input_report_abs(input, ABS_MT_POSITION_Y, y); |
824 | } | 825 | } |
@@ -1056,10 +1057,11 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, | |||
1056 | features->x_fuzz, 0); | 1057 | features->x_fuzz, 0); |
1057 | input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, | 1058 | input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, |
1058 | features->y_fuzz, 0); | 1059 | features->y_fuzz, 0); |
1059 | input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, | ||
1060 | features->pressure_fuzz, 0); | ||
1061 | 1060 | ||
1062 | if (features->device_type == BTN_TOOL_PEN) { | 1061 | if (features->device_type == BTN_TOOL_PEN) { |
1062 | input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, | ||
1063 | features->pressure_fuzz, 0); | ||
1064 | |||
1063 | /* penabled devices have fixed resolution for each model */ | 1065 | /* penabled devices have fixed resolution for each model */ |
1064 | input_abs_set_res(input_dev, ABS_X, features->x_resolution); | 1066 | input_abs_set_res(input_dev, ABS_X, features->x_resolution); |
1065 | input_abs_set_res(input_dev, ABS_Y, features->y_resolution); | 1067 | input_abs_set_res(input_dev, ABS_Y, features->y_resolution); |
@@ -1098,6 +1100,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, | |||
1098 | __set_bit(BTN_TOOL_MOUSE, input_dev->keybit); | 1100 | __set_bit(BTN_TOOL_MOUSE, input_dev->keybit); |
1099 | __set_bit(BTN_STYLUS, input_dev->keybit); | 1101 | __set_bit(BTN_STYLUS, input_dev->keybit); |
1100 | __set_bit(BTN_STYLUS2, input_dev->keybit); | 1102 | __set_bit(BTN_STYLUS2, input_dev->keybit); |
1103 | |||
1104 | __set_bit(INPUT_PROP_POINTER, input_dev->propbit); | ||
1101 | break; | 1105 | break; |
1102 | 1106 | ||
1103 | case WACOM_21UX2: | 1107 | case WACOM_21UX2: |
@@ -1126,6 +1130,9 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, | |||
1126 | } | 1130 | } |
1127 | 1131 | ||
1128 | input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); | 1132 | input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); |
1133 | |||
1134 | __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); | ||
1135 | |||
1129 | wacom_setup_cintiq(wacom_wac); | 1136 | wacom_setup_cintiq(wacom_wac); |
1130 | break; | 1137 | break; |
1131 | 1138 | ||
@@ -1150,6 +1157,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, | |||
1150 | /* fall through */ | 1157 | /* fall through */ |
1151 | 1158 | ||
1152 | case INTUOS: | 1159 | case INTUOS: |
1160 | __set_bit(INPUT_PROP_POINTER, input_dev->propbit); | ||
1161 | |||
1153 | wacom_setup_intuos(wacom_wac); | 1162 | wacom_setup_intuos(wacom_wac); |
1154 | break; | 1163 | break; |
1155 | 1164 | ||
@@ -1165,6 +1174,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, | |||
1165 | 1174 | ||
1166 | input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); | 1175 | input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); |
1167 | wacom_setup_intuos(wacom_wac); | 1176 | wacom_setup_intuos(wacom_wac); |
1177 | |||
1178 | __set_bit(INPUT_PROP_POINTER, input_dev->propbit); | ||
1168 | break; | 1179 | break; |
1169 | 1180 | ||
1170 | case TABLETPC2FG: | 1181 | case TABLETPC2FG: |
@@ -1183,26 +1194,40 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, | |||
1183 | case TABLETPC: | 1194 | case TABLETPC: |
1184 | __clear_bit(ABS_MISC, input_dev->absbit); | 1195 | __clear_bit(ABS_MISC, input_dev->absbit); |
1185 | 1196 | ||
1197 | __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); | ||
1198 | |||
1186 | if (features->device_type != BTN_TOOL_PEN) | 1199 | if (features->device_type != BTN_TOOL_PEN) |
1187 | break; /* no need to process stylus stuff */ | 1200 | break; /* no need to process stylus stuff */ |
1188 | 1201 | ||
1189 | /* fall through */ | 1202 | /* fall through */ |
1190 | 1203 | ||
1191 | case PL: | 1204 | case PL: |
1192 | case PTU: | ||
1193 | case DTU: | 1205 | case DTU: |
1194 | __set_bit(BTN_TOOL_PEN, input_dev->keybit); | 1206 | __set_bit(BTN_TOOL_PEN, input_dev->keybit); |
1207 | __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); | ||
1195 | __set_bit(BTN_STYLUS, input_dev->keybit); | 1208 | __set_bit(BTN_STYLUS, input_dev->keybit); |
1196 | __set_bit(BTN_STYLUS2, input_dev->keybit); | 1209 | __set_bit(BTN_STYLUS2, input_dev->keybit); |
1210 | |||
1211 | __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); | ||
1212 | break; | ||
1213 | |||
1214 | case PTU: | ||
1215 | __set_bit(BTN_STYLUS2, input_dev->keybit); | ||
1197 | /* fall through */ | 1216 | /* fall through */ |
1198 | 1217 | ||
1199 | case PENPARTNER: | 1218 | case PENPARTNER: |
1219 | __set_bit(BTN_TOOL_PEN, input_dev->keybit); | ||
1200 | __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); | 1220 | __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); |
1221 | __set_bit(BTN_STYLUS, input_dev->keybit); | ||
1222 | |||
1223 | __set_bit(INPUT_PROP_POINTER, input_dev->propbit); | ||
1201 | break; | 1224 | break; |
1202 | 1225 | ||
1203 | case BAMBOO_PT: | 1226 | case BAMBOO_PT: |
1204 | __clear_bit(ABS_MISC, input_dev->absbit); | 1227 | __clear_bit(ABS_MISC, input_dev->absbit); |
1205 | 1228 | ||
1229 | __set_bit(INPUT_PROP_POINTER, input_dev->propbit); | ||
1230 | |||
1206 | if (features->device_type == BTN_TOOL_DOUBLETAP) { | 1231 | if (features->device_type == BTN_TOOL_DOUBLETAP) { |
1207 | __set_bit(BTN_LEFT, input_dev->keybit); | 1232 | __set_bit(BTN_LEFT, input_dev->keybit); |
1208 | __set_bit(BTN_FORWARD, input_dev->keybit); | 1233 | __set_bit(BTN_FORWARD, input_dev->keybit); |
@@ -1460,6 +1485,9 @@ static const struct wacom_features wacom_features_0xD3 = | |||
1460 | static const struct wacom_features wacom_features_0xD4 = | 1485 | static const struct wacom_features wacom_features_0xD4 = |
1461 | { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, | 1486 | { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, |
1462 | 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; | 1487 | 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; |
1488 | static const struct wacom_features wacom_features_0xD5 = | ||
1489 | { "Wacom Bamboo Pen 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, | ||
1490 | 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; | ||
1463 | static const struct wacom_features wacom_features_0xD6 = | 1491 | static const struct wacom_features wacom_features_0xD6 = |
1464 | { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, | 1492 | { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, |
1465 | 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; | 1493 | 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; |
@@ -1564,6 +1592,7 @@ const struct usb_device_id wacom_ids[] = { | |||
1564 | { USB_DEVICE_WACOM(0xD2) }, | 1592 | { USB_DEVICE_WACOM(0xD2) }, |
1565 | { USB_DEVICE_WACOM(0xD3) }, | 1593 | { USB_DEVICE_WACOM(0xD3) }, |
1566 | { USB_DEVICE_WACOM(0xD4) }, | 1594 | { USB_DEVICE_WACOM(0xD4) }, |
1595 | { USB_DEVICE_WACOM(0xD5) }, | ||
1567 | { USB_DEVICE_WACOM(0xD6) }, | 1596 | { USB_DEVICE_WACOM(0xD6) }, |
1568 | { USB_DEVICE_WACOM(0xD7) }, | 1597 | { USB_DEVICE_WACOM(0xD7) }, |
1569 | { USB_DEVICE_WACOM(0xD8) }, | 1598 | { USB_DEVICE_WACOM(0xD8) }, |
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index ae00604a6a81..f5d66859f232 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c | |||
@@ -244,6 +244,7 @@ struct mxt_finger { | |||
244 | int x; | 244 | int x; |
245 | int y; | 245 | int y; |
246 | int area; | 246 | int area; |
247 | int pressure; | ||
247 | }; | 248 | }; |
248 | 249 | ||
249 | /* Each client has this additional data */ | 250 | /* Each client has this additional data */ |
@@ -536,6 +537,8 @@ static void mxt_input_report(struct mxt_data *data, int single_id) | |||
536 | finger[id].x); | 537 | finger[id].x); |
537 | input_report_abs(input_dev, ABS_MT_POSITION_Y, | 538 | input_report_abs(input_dev, ABS_MT_POSITION_Y, |
538 | finger[id].y); | 539 | finger[id].y); |
540 | input_report_abs(input_dev, ABS_MT_PRESSURE, | ||
541 | finger[id].pressure); | ||
539 | } else { | 542 | } else { |
540 | finger[id].status = 0; | 543 | finger[id].status = 0; |
541 | } | 544 | } |
@@ -546,6 +549,8 @@ static void mxt_input_report(struct mxt_data *data, int single_id) | |||
546 | if (status != MXT_RELEASE) { | 549 | if (status != MXT_RELEASE) { |
547 | input_report_abs(input_dev, ABS_X, finger[single_id].x); | 550 | input_report_abs(input_dev, ABS_X, finger[single_id].x); |
548 | input_report_abs(input_dev, ABS_Y, finger[single_id].y); | 551 | input_report_abs(input_dev, ABS_Y, finger[single_id].y); |
552 | input_report_abs(input_dev, | ||
553 | ABS_PRESSURE, finger[single_id].pressure); | ||
549 | } | 554 | } |
550 | 555 | ||
551 | input_sync(input_dev); | 556 | input_sync(input_dev); |
@@ -560,6 +565,7 @@ static void mxt_input_touchevent(struct mxt_data *data, | |||
560 | int x; | 565 | int x; |
561 | int y; | 566 | int y; |
562 | int area; | 567 | int area; |
568 | int pressure; | ||
563 | 569 | ||
564 | /* Check the touch is present on the screen */ | 570 | /* Check the touch is present on the screen */ |
565 | if (!(status & MXT_DETECT)) { | 571 | if (!(status & MXT_DETECT)) { |
@@ -584,6 +590,7 @@ static void mxt_input_touchevent(struct mxt_data *data, | |||
584 | y = y >> 2; | 590 | y = y >> 2; |
585 | 591 | ||
586 | area = message->message[4]; | 592 | area = message->message[4]; |
593 | pressure = message->message[5]; | ||
587 | 594 | ||
588 | dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id, | 595 | dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id, |
589 | status & MXT_MOVE ? "moved" : "pressed", | 596 | status & MXT_MOVE ? "moved" : "pressed", |
@@ -594,6 +601,7 @@ static void mxt_input_touchevent(struct mxt_data *data, | |||
594 | finger[id].x = x; | 601 | finger[id].x = x; |
595 | finger[id].y = y; | 602 | finger[id].y = y; |
596 | finger[id].area = area; | 603 | finger[id].area = area; |
604 | finger[id].pressure = pressure; | ||
597 | 605 | ||
598 | mxt_input_report(data, id); | 606 | mxt_input_report(data, id); |
599 | } | 607 | } |
@@ -1116,6 +1124,8 @@ static int __devinit mxt_probe(struct i2c_client *client, | |||
1116 | 0, data->max_x, 0, 0); | 1124 | 0, data->max_x, 0, 0); |
1117 | input_set_abs_params(input_dev, ABS_Y, | 1125 | input_set_abs_params(input_dev, ABS_Y, |
1118 | 0, data->max_y, 0, 0); | 1126 | 0, data->max_y, 0, 0); |
1127 | input_set_abs_params(input_dev, ABS_PRESSURE, | ||
1128 | 0, 255, 0, 0); | ||
1119 | 1129 | ||
1120 | /* For multi touch */ | 1130 | /* For multi touch */ |
1121 | input_mt_init_slots(input_dev, MXT_MAX_FINGER); | 1131 | input_mt_init_slots(input_dev, MXT_MAX_FINGER); |
@@ -1125,6 +1135,8 @@ static int __devinit mxt_probe(struct i2c_client *client, | |||
1125 | 0, data->max_x, 0, 0); | 1135 | 0, data->max_x, 0, 0); |
1126 | input_set_abs_params(input_dev, ABS_MT_POSITION_Y, | 1136 | input_set_abs_params(input_dev, ABS_MT_POSITION_Y, |
1127 | 0, data->max_y, 0, 0); | 1137 | 0, data->max_y, 0, 0); |
1138 | input_set_abs_params(input_dev, ABS_MT_PRESSURE, | ||
1139 | 0, 255, 0, 0); | ||
1128 | 1140 | ||
1129 | input_set_drvdata(input_dev, data); | 1141 | input_set_drvdata(input_dev, data); |
1130 | i2c_set_clientdata(client, data); | 1142 | i2c_set_clientdata(client, data); |
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c index 4f2713d92791..4627fe55b401 100644 --- a/drivers/input/touchscreen/max11801_ts.c +++ b/drivers/input/touchscreen/max11801_ts.c | |||
@@ -9,7 +9,8 @@ | |||
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 11 | * it under the terms of the GNU General Public License as published by |
12 | * the Free Software Foundation; either version 2 of the License. | 12 | * the Free Software Foundation; either version 2 of the License, or |
13 | * (at your option) any later version. | ||
13 | */ | 14 | */ |
14 | 15 | ||
15 | /* | 16 | /* |
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c index 089b0a0f3d8c..0e8f63e5b36f 100644 --- a/drivers/input/touchscreen/tnetv107x-ts.c +++ b/drivers/input/touchscreen/tnetv107x-ts.c | |||
@@ -13,6 +13,7 @@ | |||
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
17 | #include <linux/err.h> | 18 | #include <linux/err.h> |
18 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c index c14412ef4648..9941d39df43d 100644 --- a/drivers/input/touchscreen/wacom_w8001.c +++ b/drivers/input/touchscreen/wacom_w8001.c | |||
@@ -383,6 +383,8 @@ static int w8001_setup(struct w8001 *w8001) | |||
383 | dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); | 383 | dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); |
384 | strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name)); | 384 | strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name)); |
385 | 385 | ||
386 | __set_bit(INPUT_PROP_DIRECT, dev->propbit); | ||
387 | |||
386 | /* penabled? */ | 388 | /* penabled? */ |
387 | error = w8001_command(w8001, W8001_CMD_QUERY, true); | 389 | error = w8001_command(w8001, W8001_CMD_QUERY, true); |
388 | if (!error) { | 390 | if (!error) { |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index a14f8dc23462..0e4227f457af 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -605,7 +605,9 @@ static void build_inv_all(struct iommu_cmd *cmd) | |||
605 | * Writes the command to the IOMMUs command buffer and informs the | 605 | * Writes the command to the IOMMUs command buffer and informs the |
606 | * hardware about the new command. | 606 | * hardware about the new command. |
607 | */ | 607 | */ |
608 | static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | 608 | static int iommu_queue_command_sync(struct amd_iommu *iommu, |
609 | struct iommu_cmd *cmd, | ||
610 | bool sync) | ||
609 | { | 611 | { |
610 | u32 left, tail, head, next_tail; | 612 | u32 left, tail, head, next_tail; |
611 | unsigned long flags; | 613 | unsigned long flags; |
@@ -639,13 +641,18 @@ again: | |||
639 | copy_cmd_to_buffer(iommu, cmd, tail); | 641 | copy_cmd_to_buffer(iommu, cmd, tail); |
640 | 642 | ||
641 | /* We need to sync now to make sure all commands are processed */ | 643 | /* We need to sync now to make sure all commands are processed */ |
642 | iommu->need_sync = true; | 644 | iommu->need_sync = sync; |
643 | 645 | ||
644 | spin_unlock_irqrestore(&iommu->lock, flags); | 646 | spin_unlock_irqrestore(&iommu->lock, flags); |
645 | 647 | ||
646 | return 0; | 648 | return 0; |
647 | } | 649 | } |
648 | 650 | ||
651 | static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | ||
652 | { | ||
653 | return iommu_queue_command_sync(iommu, cmd, true); | ||
654 | } | ||
655 | |||
649 | /* | 656 | /* |
650 | * This function queues a completion wait command into the command | 657 | * This function queues a completion wait command into the command |
651 | * buffer of an IOMMU | 658 | * buffer of an IOMMU |
@@ -661,7 +668,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu) | |||
661 | 668 | ||
662 | build_completion_wait(&cmd, (u64)&sem); | 669 | build_completion_wait(&cmd, (u64)&sem); |
663 | 670 | ||
664 | ret = iommu_queue_command(iommu, &cmd); | 671 | ret = iommu_queue_command_sync(iommu, &cmd, false); |
665 | if (ret) | 672 | if (ret) |
666 | return ret; | 673 | return ret; |
667 | 674 | ||
@@ -840,14 +847,9 @@ static void domain_flush_complete(struct protection_domain *domain) | |||
840 | static void domain_flush_devices(struct protection_domain *domain) | 847 | static void domain_flush_devices(struct protection_domain *domain) |
841 | { | 848 | { |
842 | struct iommu_dev_data *dev_data; | 849 | struct iommu_dev_data *dev_data; |
843 | unsigned long flags; | ||
844 | |||
845 | spin_lock_irqsave(&domain->lock, flags); | ||
846 | 850 | ||
847 | list_for_each_entry(dev_data, &domain->dev_list, list) | 851 | list_for_each_entry(dev_data, &domain->dev_list, list) |
848 | device_flush_dte(dev_data); | 852 | device_flush_dte(dev_data); |
849 | |||
850 | spin_unlock_irqrestore(&domain->lock, flags); | ||
851 | } | 853 | } |
852 | 854 | ||
853 | /**************************************************************************** | 855 | /**************************************************************************** |
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 3dc9befa5aec..6dcc7e2d54de 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
@@ -1388,7 +1388,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu) | |||
1388 | return ret; | 1388 | return ret; |
1389 | } | 1389 | } |
1390 | 1390 | ||
1391 | ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); | 1391 | ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); |
1392 | if (ret) | 1392 | if (ret) |
1393 | printk(KERN_ERR "IOMMU: can't request irq\n"); | 1393 | printk(KERN_ERR "IOMMU: can't request irq\n"); |
1394 | return ret; | 1394 | return ret; |
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c index b9826032450b..8c00937bf7e7 100644 --- a/drivers/leds/leds-ams-delta.c +++ b/drivers/leds/leds-ams-delta.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/module.h> | ||
11 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
12 | #include <linux/init.h> | 13 | #include <linux/init.h> |
13 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c index 3ebe3824662d..ea2185531f82 100644 --- a/drivers/leds/leds-bd2802.c +++ b/drivers/leds/leds-bd2802.c | |||
@@ -662,6 +662,11 @@ failed_unregister_led1_R: | |||
662 | static void bd2802_unregister_led_classdev(struct bd2802_led *led) | 662 | static void bd2802_unregister_led_classdev(struct bd2802_led *led) |
663 | { | 663 | { |
664 | cancel_work_sync(&led->work); | 664 | cancel_work_sync(&led->work); |
665 | led_classdev_unregister(&led->cdev_led2b); | ||
666 | led_classdev_unregister(&led->cdev_led2g); | ||
667 | led_classdev_unregister(&led->cdev_led2r); | ||
668 | led_classdev_unregister(&led->cdev_led1b); | ||
669 | led_classdev_unregister(&led->cdev_led1g); | ||
665 | led_classdev_unregister(&led->cdev_led1r); | 670 | led_classdev_unregister(&led->cdev_led1r); |
666 | } | 671 | } |
667 | 672 | ||
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c index e4ce1fd46338..bcfbd3a60eab 100644 --- a/drivers/leds/leds-hp6xx.c +++ b/drivers/leds/leds-hp6xx.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/module.h> | ||
13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
15 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c index d87c9d02f786..328c64c0841c 100644 --- a/drivers/leds/ledtrig-timer.c +++ b/drivers/leds/ledtrig-timer.c | |||
@@ -41,6 +41,7 @@ static ssize_t led_delay_on_store(struct device *dev, | |||
41 | 41 | ||
42 | if (count == size) { | 42 | if (count == size) { |
43 | led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off); | 43 | led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off); |
44 | led_cdev->blink_delay_on = state; | ||
44 | ret = count; | 45 | ret = count; |
45 | } | 46 | } |
46 | 47 | ||
@@ -69,6 +70,7 @@ static ssize_t led_delay_off_store(struct device *dev, | |||
69 | 70 | ||
70 | if (count == size) { | 71 | if (count == size) { |
71 | led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state); | 72 | led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state); |
73 | led_cdev->blink_delay_off = state; | ||
72 | ret = count; | 74 | ret = count; |
73 | } | 75 | } |
74 | 76 | ||
diff --git a/drivers/md/linear.h b/drivers/md/linear.h index 0ce29b61605a..2f2da05b2ce9 100644 --- a/drivers/md/linear.h +++ b/drivers/md/linear.h | |||
@@ -10,9 +10,9 @@ typedef struct dev_info dev_info_t; | |||
10 | 10 | ||
11 | struct linear_private_data | 11 | struct linear_private_data |
12 | { | 12 | { |
13 | struct rcu_head rcu; | ||
13 | sector_t array_sectors; | 14 | sector_t array_sectors; |
14 | dev_info_t disks[0]; | 15 | dev_info_t disks[0]; |
15 | struct rcu_head rcu; | ||
16 | }; | 16 | }; |
17 | 17 | ||
18 | 18 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 8e221a20f5d9..5404b2295820 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -848,7 +848,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | |||
848 | bio->bi_end_io = super_written; | 848 | bio->bi_end_io = super_written; |
849 | 849 | ||
850 | atomic_inc(&mddev->pending_writes); | 850 | atomic_inc(&mddev->pending_writes); |
851 | submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio); | 851 | submit_bio(WRITE_FLUSH_FUA, bio); |
852 | } | 852 | } |
853 | 853 | ||
854 | void md_super_wait(mddev_t *mddev) | 854 | void md_super_wait(mddev_t *mddev) |
@@ -1138,8 +1138,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version | |||
1138 | ret = 0; | 1138 | ret = 0; |
1139 | } | 1139 | } |
1140 | rdev->sectors = rdev->sb_start; | 1140 | rdev->sectors = rdev->sb_start; |
1141 | /* Limit to 4TB as metadata cannot record more than that */ | ||
1142 | if (rdev->sectors >= (2ULL << 32)) | ||
1143 | rdev->sectors = (2ULL << 32) - 2; | ||
1141 | 1144 | ||
1142 | if (rdev->sectors < sb->size * 2 && sb->level > 1) | 1145 | if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) |
1143 | /* "this cannot possibly happen" ... */ | 1146 | /* "this cannot possibly happen" ... */ |
1144 | ret = -EINVAL; | 1147 | ret = -EINVAL; |
1145 | 1148 | ||
@@ -1173,7 +1176,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1173 | mddev->clevel[0] = 0; | 1176 | mddev->clevel[0] = 0; |
1174 | mddev->layout = sb->layout; | 1177 | mddev->layout = sb->layout; |
1175 | mddev->raid_disks = sb->raid_disks; | 1178 | mddev->raid_disks = sb->raid_disks; |
1176 | mddev->dev_sectors = sb->size * 2; | 1179 | mddev->dev_sectors = ((sector_t)sb->size) * 2; |
1177 | mddev->events = ev1; | 1180 | mddev->events = ev1; |
1178 | mddev->bitmap_info.offset = 0; | 1181 | mddev->bitmap_info.offset = 0; |
1179 | mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; | 1182 | mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; |
@@ -1415,6 +1418,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) | |||
1415 | rdev->sb_start = calc_dev_sboffset(rdev); | 1418 | rdev->sb_start = calc_dev_sboffset(rdev); |
1416 | if (!num_sectors || num_sectors > rdev->sb_start) | 1419 | if (!num_sectors || num_sectors > rdev->sb_start) |
1417 | num_sectors = rdev->sb_start; | 1420 | num_sectors = rdev->sb_start; |
1421 | /* Limit to 4TB as metadata cannot record more than that. | ||
1422 | * 4TB == 2^32 KB, or 2*2^32 sectors. | ||
1423 | */ | ||
1424 | if (num_sectors >= (2ULL << 32)) | ||
1425 | num_sectors = (2ULL << 32) - 2; | ||
1418 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, | 1426 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, |
1419 | rdev->sb_page); | 1427 | rdev->sb_page); |
1420 | md_super_wait(rdev->mddev); | 1428 | md_super_wait(rdev->mddev); |
@@ -1738,6 +1746,11 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1738 | sb->level = cpu_to_le32(mddev->level); | 1746 | sb->level = cpu_to_le32(mddev->level); |
1739 | sb->layout = cpu_to_le32(mddev->layout); | 1747 | sb->layout = cpu_to_le32(mddev->layout); |
1740 | 1748 | ||
1749 | if (test_bit(WriteMostly, &rdev->flags)) | ||
1750 | sb->devflags |= WriteMostly1; | ||
1751 | else | ||
1752 | sb->devflags &= ~WriteMostly1; | ||
1753 | |||
1741 | if (mddev->bitmap && mddev->bitmap_info.file == NULL) { | 1754 | if (mddev->bitmap && mddev->bitmap_info.file == NULL) { |
1742 | sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); | 1755 | sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); |
1743 | sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); | 1756 | sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); |
@@ -2561,7 +2574,10 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2561 | int err = -EINVAL; | 2574 | int err = -EINVAL; |
2562 | if (cmd_match(buf, "faulty") && rdev->mddev->pers) { | 2575 | if (cmd_match(buf, "faulty") && rdev->mddev->pers) { |
2563 | md_error(rdev->mddev, rdev); | 2576 | md_error(rdev->mddev, rdev); |
2564 | err = 0; | 2577 | if (test_bit(Faulty, &rdev->flags)) |
2578 | err = 0; | ||
2579 | else | ||
2580 | err = -EBUSY; | ||
2565 | } else if (cmd_match(buf, "remove")) { | 2581 | } else if (cmd_match(buf, "remove")) { |
2566 | if (rdev->raid_disk >= 0) | 2582 | if (rdev->raid_disk >= 0) |
2567 | err = -EBUSY; | 2583 | err = -EBUSY; |
@@ -2584,7 +2600,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2584 | err = 0; | 2600 | err = 0; |
2585 | } else if (cmd_match(buf, "-blocked")) { | 2601 | } else if (cmd_match(buf, "-blocked")) { |
2586 | if (!test_bit(Faulty, &rdev->flags) && | 2602 | if (!test_bit(Faulty, &rdev->flags) && |
2587 | test_bit(BlockedBadBlocks, &rdev->flags)) { | 2603 | rdev->badblocks.unacked_exist) { |
2588 | /* metadata handler doesn't understand badblocks, | 2604 | /* metadata handler doesn't understand badblocks, |
2589 | * so we need to fail the device | 2605 | * so we need to fail the device |
2590 | */ | 2606 | */ |
@@ -5983,6 +5999,8 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev) | |||
5983 | return -ENODEV; | 5999 | return -ENODEV; |
5984 | 6000 | ||
5985 | md_error(mddev, rdev); | 6001 | md_error(mddev, rdev); |
6002 | if (!test_bit(Faulty, &rdev->flags)) | ||
6003 | return -EBUSY; | ||
5986 | return 0; | 6004 | return 0; |
5987 | } | 6005 | } |
5988 | 6006 | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 32323f0afd89..f4622dd8fc59 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1099,12 +1099,11 @@ read_again: | |||
1099 | bio_list_add(&conf->pending_bio_list, mbio); | 1099 | bio_list_add(&conf->pending_bio_list, mbio); |
1100 | spin_unlock_irqrestore(&conf->device_lock, flags); | 1100 | spin_unlock_irqrestore(&conf->device_lock, flags); |
1101 | } | 1101 | } |
1102 | r1_bio_write_done(r1_bio); | 1102 | /* Mustn't call r1_bio_write_done before this next test, |
1103 | 1103 | * as it could result in the bio being freed. | |
1104 | /* In case raid1d snuck in to freeze_array */ | 1104 | */ |
1105 | wake_up(&conf->wait_barrier); | ||
1106 | |||
1107 | if (sectors_handled < (bio->bi_size >> 9)) { | 1105 | if (sectors_handled < (bio->bi_size >> 9)) { |
1106 | r1_bio_write_done(r1_bio); | ||
1108 | /* We need another r1_bio. It has already been counted | 1107 | /* We need another r1_bio. It has already been counted |
1109 | * in bio->bi_phys_segments | 1108 | * in bio->bi_phys_segments |
1110 | */ | 1109 | */ |
@@ -1117,6 +1116,11 @@ read_again: | |||
1117 | goto retry_write; | 1116 | goto retry_write; |
1118 | } | 1117 | } |
1119 | 1118 | ||
1119 | r1_bio_write_done(r1_bio); | ||
1120 | |||
1121 | /* In case raid1d snuck in to freeze_array */ | ||
1122 | wake_up(&conf->wait_barrier); | ||
1123 | |||
1120 | if (do_sync || !bitmap || !plugged) | 1124 | if (do_sync || !bitmap || !plugged) |
1121 | md_wakeup_thread(mddev->thread); | 1125 | md_wakeup_thread(mddev->thread); |
1122 | 1126 | ||
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8b29cd4f01c8..d7a8468ddeab 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -337,6 +337,21 @@ static void close_write(r10bio_t *r10_bio) | |||
337 | md_write_end(r10_bio->mddev); | 337 | md_write_end(r10_bio->mddev); |
338 | } | 338 | } |
339 | 339 | ||
340 | static void one_write_done(r10bio_t *r10_bio) | ||
341 | { | ||
342 | if (atomic_dec_and_test(&r10_bio->remaining)) { | ||
343 | if (test_bit(R10BIO_WriteError, &r10_bio->state)) | ||
344 | reschedule_retry(r10_bio); | ||
345 | else { | ||
346 | close_write(r10_bio); | ||
347 | if (test_bit(R10BIO_MadeGood, &r10_bio->state)) | ||
348 | reschedule_retry(r10_bio); | ||
349 | else | ||
350 | raid_end_bio_io(r10_bio); | ||
351 | } | ||
352 | } | ||
353 | } | ||
354 | |||
340 | static void raid10_end_write_request(struct bio *bio, int error) | 355 | static void raid10_end_write_request(struct bio *bio, int error) |
341 | { | 356 | { |
342 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 357 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
@@ -387,17 +402,7 @@ static void raid10_end_write_request(struct bio *bio, int error) | |||
387 | * Let's see if all mirrored write operations have finished | 402 | * Let's see if all mirrored write operations have finished |
388 | * already. | 403 | * already. |
389 | */ | 404 | */ |
390 | if (atomic_dec_and_test(&r10_bio->remaining)) { | 405 | one_write_done(r10_bio); |
391 | if (test_bit(R10BIO_WriteError, &r10_bio->state)) | ||
392 | reschedule_retry(r10_bio); | ||
393 | else { | ||
394 | close_write(r10_bio); | ||
395 | if (test_bit(R10BIO_MadeGood, &r10_bio->state)) | ||
396 | reschedule_retry(r10_bio); | ||
397 | else | ||
398 | raid_end_bio_io(r10_bio); | ||
399 | } | ||
400 | } | ||
401 | if (dec_rdev) | 406 | if (dec_rdev) |
402 | rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); | 407 | rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); |
403 | } | 408 | } |
@@ -1127,20 +1132,12 @@ retry_write: | |||
1127 | spin_unlock_irqrestore(&conf->device_lock, flags); | 1132 | spin_unlock_irqrestore(&conf->device_lock, flags); |
1128 | } | 1133 | } |
1129 | 1134 | ||
1130 | if (atomic_dec_and_test(&r10_bio->remaining)) { | 1135 | /* Don't remove the bias on 'remaining' (one_write_done) until |
1131 | /* This matches the end of raid10_end_write_request() */ | 1136 | * after checking if we need to go around again. |
1132 | bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, | 1137 | */ |
1133 | r10_bio->sectors, | ||
1134 | !test_bit(R10BIO_Degraded, &r10_bio->state), | ||
1135 | 0); | ||
1136 | md_write_end(mddev); | ||
1137 | raid_end_bio_io(r10_bio); | ||
1138 | } | ||
1139 | |||
1140 | /* In case raid10d snuck in to freeze_array */ | ||
1141 | wake_up(&conf->wait_barrier); | ||
1142 | 1138 | ||
1143 | if (sectors_handled < (bio->bi_size >> 9)) { | 1139 | if (sectors_handled < (bio->bi_size >> 9)) { |
1140 | one_write_done(r10_bio); | ||
1144 | /* We need another r10_bio. It has already been counted | 1141 | /* We need another r10_bio. It has already been counted |
1145 | * in bio->bi_phys_segments. | 1142 | * in bio->bi_phys_segments. |
1146 | */ | 1143 | */ |
@@ -1154,6 +1151,10 @@ retry_write: | |||
1154 | r10_bio->state = 0; | 1151 | r10_bio->state = 0; |
1155 | goto retry_write; | 1152 | goto retry_write; |
1156 | } | 1153 | } |
1154 | one_write_done(r10_bio); | ||
1155 | |||
1156 | /* In case raid10d snuck in to freeze_array */ | ||
1157 | wake_up(&conf->wait_barrier); | ||
1157 | 1158 | ||
1158 | if (do_sync || !mddev->bitmap || !plugged) | 1159 | if (do_sync || !mddev->bitmap || !plugged) |
1159 | md_wakeup_thread(mddev->thread); | 1160 | md_wakeup_thread(mddev->thread); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index dbae459fb02d..43709fa6b6df 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3336,7 +3336,7 @@ static void handle_stripe(struct stripe_head *sh) | |||
3336 | 3336 | ||
3337 | finish: | 3337 | finish: |
3338 | /* wait for this device to become unblocked */ | 3338 | /* wait for this device to become unblocked */ |
3339 | if (unlikely(s.blocked_rdev)) | 3339 | if (conf->mddev->external && unlikely(s.blocked_rdev)) |
3340 | md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); | 3340 | md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); |
3341 | 3341 | ||
3342 | if (s.handle_bad_blocks) | 3342 | if (s.handle_bad_blocks) |
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c index 3db89e3cb0bb..536c16c943bd 100644 --- a/drivers/media/dvb/dvb-usb/vp7045.c +++ b/drivers/media/dvb/dvb-usb/vp7045.c | |||
@@ -224,26 +224,8 @@ static struct dvb_usb_device_properties vp7045_properties; | |||
224 | static int vp7045_usb_probe(struct usb_interface *intf, | 224 | static int vp7045_usb_probe(struct usb_interface *intf, |
225 | const struct usb_device_id *id) | 225 | const struct usb_device_id *id) |
226 | { | 226 | { |
227 | struct dvb_usb_device *d; | 227 | return dvb_usb_device_init(intf, &vp7045_properties, |
228 | int ret = dvb_usb_device_init(intf, &vp7045_properties, | 228 | THIS_MODULE, NULL, adapter_nr); |
229 | THIS_MODULE, &d, adapter_nr); | ||
230 | if (ret) | ||
231 | return ret; | ||
232 | |||
233 | d->priv = kmalloc(20, GFP_KERNEL); | ||
234 | if (!d->priv) { | ||
235 | dvb_usb_device_exit(intf); | ||
236 | return -ENOMEM; | ||
237 | } | ||
238 | |||
239 | return ret; | ||
240 | } | ||
241 | |||
242 | static void vp7045_usb_disconnect(struct usb_interface *intf) | ||
243 | { | ||
244 | struct dvb_usb_device *d = usb_get_intfdata(intf); | ||
245 | kfree(d->priv); | ||
246 | dvb_usb_device_exit(intf); | ||
247 | } | 229 | } |
248 | 230 | ||
249 | static struct usb_device_id vp7045_usb_table [] = { | 231 | static struct usb_device_id vp7045_usb_table [] = { |
@@ -258,7 +240,7 @@ MODULE_DEVICE_TABLE(usb, vp7045_usb_table); | |||
258 | static struct dvb_usb_device_properties vp7045_properties = { | 240 | static struct dvb_usb_device_properties vp7045_properties = { |
259 | .usb_ctrl = CYPRESS_FX2, | 241 | .usb_ctrl = CYPRESS_FX2, |
260 | .firmware = "dvb-usb-vp7045-01.fw", | 242 | .firmware = "dvb-usb-vp7045-01.fw", |
261 | .size_of_priv = sizeof(u8 *), | 243 | .size_of_priv = 20, |
262 | 244 | ||
263 | .num_adapters = 1, | 245 | .num_adapters = 1, |
264 | .adapter = { | 246 | .adapter = { |
@@ -305,7 +287,7 @@ static struct dvb_usb_device_properties vp7045_properties = { | |||
305 | static struct usb_driver vp7045_usb_driver = { | 287 | static struct usb_driver vp7045_usb_driver = { |
306 | .name = "dvb_usb_vp7045", | 288 | .name = "dvb_usb_vp7045", |
307 | .probe = vp7045_usb_probe, | 289 | .probe = vp7045_usb_probe, |
308 | .disconnect = vp7045_usb_disconnect, | 290 | .disconnect = dvb_usb_device_exit, |
309 | .id_table = vp7045_usb_table, | 291 | .id_table = vp7045_usb_table, |
310 | }; | 292 | }; |
311 | 293 | ||
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c index eae05b500476..144f3f55d765 100644 --- a/drivers/media/rc/nuvoton-cir.c +++ b/drivers/media/rc/nuvoton-cir.c | |||
@@ -618,7 +618,6 @@ static void nvt_dump_rx_buf(struct nvt_dev *nvt) | |||
618 | static void nvt_process_rx_ir_data(struct nvt_dev *nvt) | 618 | static void nvt_process_rx_ir_data(struct nvt_dev *nvt) |
619 | { | 619 | { |
620 | DEFINE_IR_RAW_EVENT(rawir); | 620 | DEFINE_IR_RAW_EVENT(rawir); |
621 | unsigned int count; | ||
622 | u32 carrier; | 621 | u32 carrier; |
623 | u8 sample; | 622 | u8 sample; |
624 | int i; | 623 | int i; |
@@ -631,65 +630,38 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt) | |||
631 | if (nvt->carrier_detect_enabled) | 630 | if (nvt->carrier_detect_enabled) |
632 | carrier = nvt_rx_carrier_detect(nvt); | 631 | carrier = nvt_rx_carrier_detect(nvt); |
633 | 632 | ||
634 | count = nvt->pkts; | 633 | nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts); |
635 | nvt_dbg_verbose("Processing buffer of len %d", count); | ||
636 | 634 | ||
637 | init_ir_raw_event(&rawir); | 635 | init_ir_raw_event(&rawir); |
638 | 636 | ||
639 | for (i = 0; i < count; i++) { | 637 | for (i = 0; i < nvt->pkts; i++) { |
640 | nvt->pkts--; | ||
641 | sample = nvt->buf[i]; | 638 | sample = nvt->buf[i]; |
642 | 639 | ||
643 | rawir.pulse = ((sample & BUF_PULSE_BIT) != 0); | 640 | rawir.pulse = ((sample & BUF_PULSE_BIT) != 0); |
644 | rawir.duration = US_TO_NS((sample & BUF_LEN_MASK) | 641 | rawir.duration = US_TO_NS((sample & BUF_LEN_MASK) |
645 | * SAMPLE_PERIOD); | 642 | * SAMPLE_PERIOD); |
646 | 643 | ||
647 | if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) { | 644 | nvt_dbg("Storing %s with duration %d", |
648 | if (nvt->rawir.pulse == rawir.pulse) | 645 | rawir.pulse ? "pulse" : "space", rawir.duration); |
649 | nvt->rawir.duration += rawir.duration; | ||
650 | else { | ||
651 | nvt->rawir.duration = rawir.duration; | ||
652 | nvt->rawir.pulse = rawir.pulse; | ||
653 | } | ||
654 | continue; | ||
655 | } | ||
656 | |||
657 | rawir.duration += nvt->rawir.duration; | ||
658 | 646 | ||
659 | init_ir_raw_event(&nvt->rawir); | 647 | ir_raw_event_store_with_filter(nvt->rdev, &rawir); |
660 | nvt->rawir.duration = 0; | ||
661 | nvt->rawir.pulse = rawir.pulse; | ||
662 | |||
663 | if (sample == BUF_PULSE_BIT) | ||
664 | rawir.pulse = false; | ||
665 | |||
666 | if (rawir.duration) { | ||
667 | nvt_dbg("Storing %s with duration %d", | ||
668 | rawir.pulse ? "pulse" : "space", | ||
669 | rawir.duration); | ||
670 | |||
671 | ir_raw_event_store_with_filter(nvt->rdev, &rawir); | ||
672 | } | ||
673 | 648 | ||
674 | /* | 649 | /* |
675 | * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE | 650 | * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE |
676 | * indicates end of IR signal, but new data incoming. In both | 651 | * indicates end of IR signal, but new data incoming. In both |
677 | * cases, it means we're ready to call ir_raw_event_handle | 652 | * cases, it means we're ready to call ir_raw_event_handle |
678 | */ | 653 | */ |
679 | if ((sample == BUF_PULSE_BIT) && nvt->pkts) { | 654 | if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) { |
680 | nvt_dbg("Calling ir_raw_event_handle (signal end)\n"); | 655 | nvt_dbg("Calling ir_raw_event_handle (signal end)\n"); |
681 | ir_raw_event_handle(nvt->rdev); | 656 | ir_raw_event_handle(nvt->rdev); |
682 | } | 657 | } |
683 | } | 658 | } |
684 | 659 | ||
660 | nvt->pkts = 0; | ||
661 | |||
685 | nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n"); | 662 | nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n"); |
686 | ir_raw_event_handle(nvt->rdev); | 663 | ir_raw_event_handle(nvt->rdev); |
687 | 664 | ||
688 | if (nvt->pkts) { | ||
689 | nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts); | ||
690 | nvt->pkts = 0; | ||
691 | } | ||
692 | |||
693 | nvt_dbg_verbose("%s done", __func__); | 665 | nvt_dbg_verbose("%s done", __func__); |
694 | } | 666 | } |
695 | 667 | ||
@@ -1048,7 +1020,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) | |||
1048 | 1020 | ||
1049 | spin_lock_init(&nvt->nvt_lock); | 1021 | spin_lock_init(&nvt->nvt_lock); |
1050 | spin_lock_init(&nvt->tx.lock); | 1022 | spin_lock_init(&nvt->tx.lock); |
1051 | init_ir_raw_event(&nvt->rawir); | ||
1052 | 1023 | ||
1053 | ret = -EBUSY; | 1024 | ret = -EBUSY; |
1054 | /* now claim resources */ | 1025 | /* now claim resources */ |
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h index 1241fc89a36c..0d5e0872a2ea 100644 --- a/drivers/media/rc/nuvoton-cir.h +++ b/drivers/media/rc/nuvoton-cir.h | |||
@@ -67,7 +67,6 @@ static int debug; | |||
67 | struct nvt_dev { | 67 | struct nvt_dev { |
68 | struct pnp_dev *pdev; | 68 | struct pnp_dev *pdev; |
69 | struct rc_dev *rdev; | 69 | struct rc_dev *rdev; |
70 | struct ir_raw_event rawir; | ||
71 | 70 | ||
72 | spinlock_t nvt_lock; | 71 | spinlock_t nvt_lock; |
73 | 72 | ||
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c index 0800433b2092..18305c89083c 100644 --- a/drivers/media/video/gspca/ov519.c +++ b/drivers/media/video/gspca/ov519.c | |||
@@ -2858,7 +2858,6 @@ static void ov7xx0_configure(struct sd *sd) | |||
2858 | case 0x60: | 2858 | case 0x60: |
2859 | PDEBUG(D_PROBE, "Sensor is a OV7660"); | 2859 | PDEBUG(D_PROBE, "Sensor is a OV7660"); |
2860 | sd->sensor = SEN_OV7660; | 2860 | sd->sensor = SEN_OV7660; |
2861 | sd->invert_led = 0; | ||
2862 | break; | 2861 | break; |
2863 | default: | 2862 | default: |
2864 | PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low); | 2863 | PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low); |
@@ -3337,7 +3336,6 @@ static int sd_config(struct gspca_dev *gspca_dev, | |||
3337 | case BRIDGE_OV519: | 3336 | case BRIDGE_OV519: |
3338 | cam->cam_mode = ov519_vga_mode; | 3337 | cam->cam_mode = ov519_vga_mode; |
3339 | cam->nmodes = ARRAY_SIZE(ov519_vga_mode); | 3338 | cam->nmodes = ARRAY_SIZE(ov519_vga_mode); |
3340 | sd->invert_led = !sd->invert_led; | ||
3341 | break; | 3339 | break; |
3342 | case BRIDGE_OVFX2: | 3340 | case BRIDGE_OVFX2: |
3343 | cam->cam_mode = ov519_vga_mode; | 3341 | cam->cam_mode = ov519_vga_mode; |
@@ -5005,24 +5003,24 @@ static const struct sd_desc sd_desc = { | |||
5005 | /* -- module initialisation -- */ | 5003 | /* -- module initialisation -- */ |
5006 | static const struct usb_device_id device_table[] = { | 5004 | static const struct usb_device_id device_table[] = { |
5007 | {USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF }, | 5005 | {USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF }, |
5008 | {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 }, | 5006 | {USB_DEVICE(0x041e, 0x4052), |
5009 | {USB_DEVICE(0x041e, 0x405f), | ||
5010 | .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, | 5007 | .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, |
5008 | {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 }, | ||
5011 | {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 }, | 5009 | {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 }, |
5012 | {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 }, | 5010 | {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 }, |
5013 | {USB_DEVICE(0x041e, 0x4064), | 5011 | {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 }, |
5014 | .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, | ||
5015 | {USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 }, | 5012 | {USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 }, |
5016 | {USB_DEVICE(0x041e, 0x4068), | 5013 | {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 }, |
5014 | {USB_DEVICE(0x045e, 0x028c), | ||
5017 | .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, | 5015 | .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, |
5018 | {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 }, | ||
5019 | {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 }, | 5016 | {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 }, |
5020 | {USB_DEVICE(0x054c, 0x0155), | 5017 | {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 }, |
5021 | .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, | ||
5022 | {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 }, | 5018 | {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 }, |
5023 | {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 }, | 5019 | {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 }, |
5024 | {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 }, | 5020 | {USB_DEVICE(0x05a9, 0x0519), |
5025 | {USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 }, | 5021 | .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, |
5022 | {USB_DEVICE(0x05a9, 0x0530), | ||
5023 | .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, | ||
5026 | {USB_DEVICE(0x05a9, 0x2800), .driver_info = BRIDGE_OVFX2 }, | 5024 | {USB_DEVICE(0x05a9, 0x2800), .driver_info = BRIDGE_OVFX2 }, |
5027 | {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 }, | 5025 | {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 }, |
5028 | {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 }, | 5026 | {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 }, |
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c index 81b8a600783b..c477ad11f103 100644 --- a/drivers/media/video/gspca/sonixj.c +++ b/drivers/media/video/gspca/sonixj.c | |||
@@ -2386,7 +2386,7 @@ static int sd_start(struct gspca_dev *gspca_dev) | |||
2386 | reg_w1(gspca_dev, 0x01, 0x22); | 2386 | reg_w1(gspca_dev, 0x01, 0x22); |
2387 | msleep(100); | 2387 | msleep(100); |
2388 | reg01 = SCL_SEL_OD | S_PDN_INV; | 2388 | reg01 = SCL_SEL_OD | S_PDN_INV; |
2389 | reg17 &= MCK_SIZE_MASK; | 2389 | reg17 &= ~MCK_SIZE_MASK; |
2390 | reg17 |= 0x04; /* clock / 4 */ | 2390 | reg17 |= 0x04; /* clock / 4 */ |
2391 | break; | 2391 | break; |
2392 | } | 2392 | } |
@@ -2532,6 +2532,10 @@ static int sd_start(struct gspca_dev *gspca_dev) | |||
2532 | if (!mode) { /* if 640x480 */ | 2532 | if (!mode) { /* if 640x480 */ |
2533 | reg17 &= ~MCK_SIZE_MASK; | 2533 | reg17 &= ~MCK_SIZE_MASK; |
2534 | reg17 |= 0x04; /* clock / 4 */ | 2534 | reg17 |= 0x04; /* clock / 4 */ |
2535 | } else { | ||
2536 | reg01 &= ~SYS_SEL_48M; /* clk 24Mz */ | ||
2537 | reg17 &= ~MCK_SIZE_MASK; | ||
2538 | reg17 |= 0x02; /* clock / 2 */ | ||
2535 | } | 2539 | } |
2536 | break; | 2540 | break; |
2537 | case SENSOR_OV7630: | 2541 | case SENSOR_OV7630: |
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c index e9a0e94b9995..8c70e64444e7 100644 --- a/drivers/media/video/pwc/pwc-v4l.c +++ b/drivers/media/video/pwc/pwc-v4l.c | |||
@@ -338,7 +338,7 @@ int pwc_init_controls(struct pwc_device *pdev) | |||
338 | if (pdev->restore_factory) | 338 | if (pdev->restore_factory) |
339 | pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE; | 339 | pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE; |
340 | 340 | ||
341 | if (!pdev->features & FEATURE_MOTOR_PANTILT) | 341 | if (!(pdev->features & FEATURE_MOTOR_PANTILT)) |
342 | return hdl->error; | 342 | return hdl->error; |
343 | 343 | ||
344 | /* Motor pan / tilt / reset */ | 344 | /* Motor pan / tilt / reset */ |
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c index 85d3048c1d67..bb7f17f2a33c 100644 --- a/drivers/media/video/via-camera.c +++ b/drivers/media/video/via-camera.c | |||
@@ -1332,6 +1332,8 @@ static __devinit bool viacam_serial_is_enabled(void) | |||
1332 | struct pci_bus *pbus = pci_find_bus(0, 0); | 1332 | struct pci_bus *pbus = pci_find_bus(0, 0); |
1333 | u8 cbyte; | 1333 | u8 cbyte; |
1334 | 1334 | ||
1335 | if (!pbus) | ||
1336 | return false; | ||
1335 | pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN, | 1337 | pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN, |
1336 | VIACAM_SERIAL_CREG, &cbyte); | 1338 | VIACAM_SERIAL_CREG, &cbyte); |
1337 | if ((cbyte & VIACAM_SERIAL_BIT) == 0) | 1339 | if ((cbyte & VIACAM_SERIAL_BIT) == 0) |
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c index 5d1fca0277ef..f83103b8970d 100644 --- a/drivers/mfd/max8997.c +++ b/drivers/mfd/max8997.c | |||
@@ -135,10 +135,13 @@ static int max8997_i2c_probe(struct i2c_client *i2c, | |||
135 | max8997->dev = &i2c->dev; | 135 | max8997->dev = &i2c->dev; |
136 | max8997->i2c = i2c; | 136 | max8997->i2c = i2c; |
137 | max8997->type = id->driver_data; | 137 | max8997->type = id->driver_data; |
138 | max8997->irq = i2c->irq; | ||
138 | 139 | ||
139 | if (!pdata) | 140 | if (!pdata) |
140 | goto err; | 141 | goto err; |
141 | 142 | ||
143 | max8997->irq_base = pdata->irq_base; | ||
144 | max8997->ono = pdata->ono; | ||
142 | max8997->wakeup = pdata->wakeup; | 145 | max8997->wakeup = pdata->wakeup; |
143 | 146 | ||
144 | mutex_init(&max8997->iolock); | 147 | mutex_init(&max8997->iolock); |
@@ -152,6 +155,8 @@ static int max8997_i2c_probe(struct i2c_client *i2c, | |||
152 | 155 | ||
153 | pm_runtime_set_active(max8997->dev); | 156 | pm_runtime_set_active(max8997->dev); |
154 | 157 | ||
158 | max8997_irq_init(max8997); | ||
159 | |||
155 | mfd_add_devices(max8997->dev, -1, max8997_devs, | 160 | mfd_add_devices(max8997->dev, -1, max8997_devs, |
156 | ARRAY_SIZE(max8997_devs), | 161 | ARRAY_SIZE(max8997_devs), |
157 | NULL, 0); | 162 | NULL, 0); |
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 29601e7d606d..86e14583a082 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c | |||
@@ -17,6 +17,7 @@ | |||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
18 | */ | 18 | */ |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/module.h> | ||
20 | #include <linux/types.h> | 21 | #include <linux/types.h> |
21 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
22 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
@@ -676,7 +677,6 @@ static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count) | |||
676 | | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF | 677 | | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF |
677 | | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); | 678 | | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); |
678 | 679 | ||
679 | reg |= (1 << (i + 1)); | ||
680 | } else | 680 | } else |
681 | continue; | 681 | continue; |
682 | 682 | ||
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c index 2bfad5c86cc7..a56be931551c 100644 --- a/drivers/mfd/tps65910-irq.c +++ b/drivers/mfd/tps65910-irq.c | |||
@@ -178,8 +178,10 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq, | |||
178 | switch (tps65910_chip_id(tps65910)) { | 178 | switch (tps65910_chip_id(tps65910)) { |
179 | case TPS65910: | 179 | case TPS65910: |
180 | tps65910->irq_num = TPS65910_NUM_IRQ; | 180 | tps65910->irq_num = TPS65910_NUM_IRQ; |
181 | break; | ||
181 | case TPS65911: | 182 | case TPS65911: |
182 | tps65910->irq_num = TPS65911_NUM_IRQ; | 183 | tps65910->irq_num = TPS65911_NUM_IRQ; |
184 | break; | ||
183 | } | 185 | } |
184 | 186 | ||
185 | /* Register with genirq */ | 187 | /* Register with genirq */ |
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c index b5d598c3aa71..7cbf2aa9e64f 100644 --- a/drivers/mfd/twl4030-madc.c +++ b/drivers/mfd/twl4030-madc.c | |||
@@ -510,8 +510,9 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req) | |||
510 | u8 ch_msb, ch_lsb; | 510 | u8 ch_msb, ch_lsb; |
511 | int ret; | 511 | int ret; |
512 | 512 | ||
513 | if (!req) | 513 | if (!req || !twl4030_madc) |
514 | return -EINVAL; | 514 | return -EINVAL; |
515 | |||
515 | mutex_lock(&twl4030_madc->lock); | 516 | mutex_lock(&twl4030_madc->lock); |
516 | if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) { | 517 | if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) { |
517 | ret = -EINVAL; | 518 | ret = -EINVAL; |
@@ -706,6 +707,8 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev) | |||
706 | if (!madc) | 707 | if (!madc) |
707 | return -ENOMEM; | 708 | return -ENOMEM; |
708 | 709 | ||
710 | madc->dev = &pdev->dev; | ||
711 | |||
709 | /* | 712 | /* |
710 | * Phoenix provides 2 interrupt lines. The first one is connected to | 713 | * Phoenix provides 2 interrupt lines. The first one is connected to |
711 | * the OMAP. The other one can be connected to the other processor such | 714 | * the OMAP. The other one can be connected to the other processor such |
diff --git a/drivers/mfd/wm8350-gpio.c b/drivers/mfd/wm8350-gpio.c index ebf99bef392f..d584f6b4d6e2 100644 --- a/drivers/mfd/wm8350-gpio.c +++ b/drivers/mfd/wm8350-gpio.c | |||
@@ -37,7 +37,7 @@ static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir) | |||
37 | return ret; | 37 | return ret; |
38 | } | 38 | } |
39 | 39 | ||
40 | static int gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) | 40 | static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) |
41 | { | 41 | { |
42 | if (db == WM8350_GPIO_DEBOUNCE_ON) | 42 | if (db == WM8350_GPIO_DEBOUNCE_ON) |
43 | return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE, | 43 | return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE, |
@@ -210,7 +210,7 @@ int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func, | |||
210 | goto err; | 210 | goto err; |
211 | if (gpio_set_polarity(wm8350, gpio, pol)) | 211 | if (gpio_set_polarity(wm8350, gpio, pol)) |
212 | goto err; | 212 | goto err; |
213 | if (gpio_set_debounce(wm8350, gpio, debounce)) | 213 | if (wm8350_gpio_set_debounce(wm8350, gpio, debounce)) |
214 | goto err; | 214 | goto err; |
215 | if (gpio_set_dir(wm8350, gpio, dir)) | 215 | if (gpio_set_dir(wm8350, gpio, dir)) |
216 | goto err; | 216 | goto err; |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 0a4d86c6c4a4..2d6423c2d193 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -146,6 +146,7 @@ config PHANTOM | |||
146 | 146 | ||
147 | config INTEL_MID_PTI | 147 | config INTEL_MID_PTI |
148 | tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard" | 148 | tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard" |
149 | depends on PCI | ||
149 | default n | 150 | default n |
150 | help | 151 | help |
151 | The PTI (Parallel Trace Interface) driver directs | 152 | The PTI (Parallel Trace Interface) driver directs |
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c index 54e3d05b63cc..35903154ca2e 100644 --- a/drivers/misc/ab8500-pwm.c +++ b/drivers/misc/ab8500-pwm.c | |||
@@ -164,5 +164,5 @@ subsys_initcall(ab8500_pwm_init); | |||
164 | module_exit(ab8500_pwm_exit); | 164 | module_exit(ab8500_pwm_exit); |
165 | MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>"); | 165 | MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>"); |
166 | MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver"); | 166 | MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver"); |
167 | MODULE_ALIAS("AB8500 PWM driver"); | 167 | MODULE_ALIAS("platform:ab8500-pwm"); |
168 | MODULE_LICENSE("GPL v2"); | 168 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c index efec4139c3f6..68cd05b6d829 100644 --- a/drivers/misc/cb710/core.c +++ b/drivers/misc/cb710/core.c | |||
@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(cb710_pci_update_config_reg); | |||
33 | static int __devinit cb710_pci_configure(struct pci_dev *pdev) | 33 | static int __devinit cb710_pci_configure(struct pci_dev *pdev) |
34 | { | 34 | { |
35 | unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); | 35 | unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); |
36 | struct pci_dev *pdev0 = pci_get_slot(pdev->bus, devfn); | 36 | struct pci_dev *pdev0; |
37 | u32 val; | 37 | u32 val; |
38 | 38 | ||
39 | cb710_pci_update_config_reg(pdev, 0x48, | 39 | cb710_pci_update_config_reg(pdev, 0x48, |
@@ -43,6 +43,7 @@ static int __devinit cb710_pci_configure(struct pci_dev *pdev) | |||
43 | if (val & 0x80000000) | 43 | if (val & 0x80000000) |
44 | return 0; | 44 | return 0; |
45 | 45 | ||
46 | pdev0 = pci_get_slot(pdev->bus, devfn); | ||
46 | if (!pdev0) | 47 | if (!pdev0) |
47 | return -ENODEV; | 48 | return -ENODEV; |
48 | 49 | ||
diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c index 5325a7e70dcf..27dc0d21aafa 100644 --- a/drivers/misc/fsa9480.c +++ b/drivers/misc/fsa9480.c | |||
@@ -455,7 +455,7 @@ static int __devinit fsa9480_probe(struct i2c_client *client, | |||
455 | 455 | ||
456 | fail2: | 456 | fail2: |
457 | if (client->irq) | 457 | if (client->irq) |
458 | free_irq(client->irq, NULL); | 458 | free_irq(client->irq, usbsw); |
459 | fail1: | 459 | fail1: |
460 | i2c_set_clientdata(client, NULL); | 460 | i2c_set_clientdata(client, NULL); |
461 | kfree(usbsw); | 461 | kfree(usbsw); |
@@ -466,7 +466,7 @@ static int __devexit fsa9480_remove(struct i2c_client *client) | |||
466 | { | 466 | { |
467 | struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); | 467 | struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); |
468 | if (client->irq) | 468 | if (client->irq) |
469 | free_irq(client->irq, NULL); | 469 | free_irq(client->irq, usbsw); |
470 | i2c_set_clientdata(client, NULL); | 470 | i2c_set_clientdata(client, NULL); |
471 | 471 | ||
472 | sysfs_remove_group(&client->dev.kobj, &fsa9480_group); | 472 | sysfs_remove_group(&client->dev.kobj, &fsa9480_group); |
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c index 8653bd0b1a33..0b56e3f43573 100644 --- a/drivers/misc/pti.c +++ b/drivers/misc/pti.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <linux/mutex.h> | 33 | #include <linux/mutex.h> |
34 | #include <linux/miscdevice.h> | 34 | #include <linux/miscdevice.h> |
35 | #include <linux/pti.h> | 35 | #include <linux/pti.h> |
36 | #include <linux/slab.h> | ||
37 | #include <linux/uaccess.h> | ||
36 | 38 | ||
37 | #define DRIVERNAME "pti" | 39 | #define DRIVERNAME "pti" |
38 | #define PCINAME "pciPTI" | 40 | #define PCINAME "pciPTI" |
@@ -163,6 +165,11 @@ static void pti_write_to_aperture(struct pti_masterchannel *mc, | |||
163 | static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc, | 165 | static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc, |
164 | const char *thread_name) | 166 | const char *thread_name) |
165 | { | 167 | { |
168 | /* | ||
169 | * Since we access the comm member in current's task_struct, we only | ||
170 | * need to be as large as what 'comm' in that structure is. | ||
171 | */ | ||
172 | char comm[TASK_COMM_LEN]; | ||
166 | struct pti_masterchannel mccontrol = {.master = CONTROL_ID, | 173 | struct pti_masterchannel mccontrol = {.master = CONTROL_ID, |
167 | .channel = 0}; | 174 | .channel = 0}; |
168 | const char *thread_name_p; | 175 | const char *thread_name_p; |
@@ -170,13 +177,6 @@ static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc, | |||
170 | u8 control_frame[CONTROL_FRAME_LEN]; | 177 | u8 control_frame[CONTROL_FRAME_LEN]; |
171 | 178 | ||
172 | if (!thread_name) { | 179 | if (!thread_name) { |
173 | /* | ||
174 | * Since we access the comm member in current's task_struct, | ||
175 | * we only need to be as large as what 'comm' in that | ||
176 | * structure is. | ||
177 | */ | ||
178 | char comm[TASK_COMM_LEN]; | ||
179 | |||
180 | if (!in_interrupt()) | 180 | if (!in_interrupt()) |
181 | get_task_comm(comm, current); | 181 | get_task_comm(comm, current); |
182 | else | 182 | else |
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index 54c91ffe4a91..ba168a7d54d4 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c | |||
@@ -338,6 +338,12 @@ void st_int_recv(void *disc_data, | |||
338 | /* Unknow packet? */ | 338 | /* Unknow packet? */ |
339 | default: | 339 | default: |
340 | type = *ptr; | 340 | type = *ptr; |
341 | if (st_gdata->list[type] == NULL) { | ||
342 | pr_err("chip/interface misbehavior dropping" | ||
343 | " frame starting with 0x%02x", type); | ||
344 | goto done; | ||
345 | |||
346 | } | ||
341 | st_gdata->rx_skb = alloc_skb( | 347 | st_gdata->rx_skb = alloc_skb( |
342 | st_gdata->list[type]->max_frame_size, | 348 | st_gdata->list[type]->max_frame_size, |
343 | GFP_ATOMIC); | 349 | GFP_ATOMIC); |
@@ -354,6 +360,7 @@ void st_int_recv(void *disc_data, | |||
354 | ptr++; | 360 | ptr++; |
355 | count--; | 361 | count--; |
356 | } | 362 | } |
363 | done: | ||
357 | spin_unlock_irqrestore(&st_gdata->lock, flags); | 364 | spin_unlock_irqrestore(&st_gdata->lock, flags); |
358 | pr_debug("done %s", __func__); | 365 | pr_debug("done %s", __func__); |
359 | return; | 366 | return; |
@@ -717,9 +724,10 @@ static void st_tty_close(struct tty_struct *tty) | |||
717 | */ | 724 | */ |
718 | spin_lock_irqsave(&st_gdata->lock, flags); | 725 | spin_lock_irqsave(&st_gdata->lock, flags); |
719 | for (i = ST_BT; i < ST_MAX_CHANNELS; i++) { | 726 | for (i = ST_BT; i < ST_MAX_CHANNELS; i++) { |
720 | if (st_gdata->list[i] != NULL) | 727 | if (st_gdata->is_registered[i] == true) |
721 | pr_err("%d not un-registered", i); | 728 | pr_err("%d not un-registered", i); |
722 | st_gdata->list[i] = NULL; | 729 | st_gdata->list[i] = NULL; |
730 | st_gdata->is_registered[i] = false; | ||
723 | } | 731 | } |
724 | st_gdata->protos_registered = 0; | 732 | st_gdata->protos_registered = 0; |
725 | spin_unlock_irqrestore(&st_gdata->lock, flags); | 733 | spin_unlock_irqrestore(&st_gdata->lock, flags); |
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c index 38fd2f04c07e..3a3580566dfc 100644 --- a/drivers/misc/ti-st/st_kim.c +++ b/drivers/misc/ti-st/st_kim.c | |||
@@ -68,6 +68,7 @@ void validate_firmware_response(struct kim_data_s *kim_gdata) | |||
68 | if (unlikely(skb->data[5] != 0)) { | 68 | if (unlikely(skb->data[5] != 0)) { |
69 | pr_err("no proper response during fw download"); | 69 | pr_err("no proper response during fw download"); |
70 | pr_err("data6 %x", skb->data[5]); | 70 | pr_err("data6 %x", skb->data[5]); |
71 | kfree_skb(skb); | ||
71 | return; /* keep waiting for the proper response */ | 72 | return; /* keep waiting for the proper response */ |
72 | } | 73 | } |
73 | /* becos of all the script being downloaded */ | 74 | /* becos of all the script being downloaded */ |
@@ -210,6 +211,7 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name) | |||
210 | pr_err(" waiting for ver info- timed out "); | 211 | pr_err(" waiting for ver info- timed out "); |
211 | return -ETIMEDOUT; | 212 | return -ETIMEDOUT; |
212 | } | 213 | } |
214 | INIT_COMPLETION(kim_gdata->kim_rcvd); | ||
213 | 215 | ||
214 | version = | 216 | version = |
215 | MAKEWORD(kim_gdata->resp_buffer[13], | 217 | MAKEWORD(kim_gdata->resp_buffer[13], |
@@ -298,6 +300,7 @@ static long download_firmware(struct kim_data_s *kim_gdata) | |||
298 | 300 | ||
299 | switch (((struct bts_action *)ptr)->type) { | 301 | switch (((struct bts_action *)ptr)->type) { |
300 | case ACTION_SEND_COMMAND: /* action send */ | 302 | case ACTION_SEND_COMMAND: /* action send */ |
303 | pr_debug("S"); | ||
301 | action_ptr = &(((struct bts_action *)ptr)->data[0]); | 304 | action_ptr = &(((struct bts_action *)ptr)->data[0]); |
302 | if (unlikely | 305 | if (unlikely |
303 | (((struct hci_command *)action_ptr)->opcode == | 306 | (((struct hci_command *)action_ptr)->opcode == |
@@ -335,6 +338,10 @@ static long download_firmware(struct kim_data_s *kim_gdata) | |||
335 | release_firmware(kim_gdata->fw_entry); | 338 | release_firmware(kim_gdata->fw_entry); |
336 | return -ETIMEDOUT; | 339 | return -ETIMEDOUT; |
337 | } | 340 | } |
341 | /* reinit completion before sending for the | ||
342 | * relevant wait | ||
343 | */ | ||
344 | INIT_COMPLETION(kim_gdata->kim_rcvd); | ||
338 | 345 | ||
339 | /* | 346 | /* |
340 | * Free space found in uart buffer, call st_int_write | 347 | * Free space found in uart buffer, call st_int_write |
@@ -361,6 +368,7 @@ static long download_firmware(struct kim_data_s *kim_gdata) | |||
361 | } | 368 | } |
362 | break; | 369 | break; |
363 | case ACTION_WAIT_EVENT: /* wait */ | 370 | case ACTION_WAIT_EVENT: /* wait */ |
371 | pr_debug("W"); | ||
364 | if (!wait_for_completion_timeout | 372 | if (!wait_for_completion_timeout |
365 | (&kim_gdata->kim_rcvd, | 373 | (&kim_gdata->kim_rcvd, |
366 | msecs_to_jiffies(CMD_RESP_TIME))) { | 374 | msecs_to_jiffies(CMD_RESP_TIME))) { |
@@ -434,11 +442,17 @@ long st_kim_start(void *kim_data) | |||
434 | { | 442 | { |
435 | long err = 0; | 443 | long err = 0; |
436 | long retry = POR_RETRY_COUNT; | 444 | long retry = POR_RETRY_COUNT; |
445 | struct ti_st_plat_data *pdata; | ||
437 | struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; | 446 | struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; |
438 | 447 | ||
439 | pr_info(" %s", __func__); | 448 | pr_info(" %s", __func__); |
449 | pdata = kim_gdata->kim_pdev->dev.platform_data; | ||
440 | 450 | ||
441 | do { | 451 | do { |
452 | /* platform specific enabling code here */ | ||
453 | if (pdata->chip_enable) | ||
454 | pdata->chip_enable(kim_gdata); | ||
455 | |||
442 | /* Configure BT nShutdown to HIGH state */ | 456 | /* Configure BT nShutdown to HIGH state */ |
443 | gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); | 457 | gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); |
444 | mdelay(5); /* FIXME: a proper toggle */ | 458 | mdelay(5); /* FIXME: a proper toggle */ |
@@ -460,6 +474,12 @@ long st_kim_start(void *kim_data) | |||
460 | pr_info("ldisc_install = 0"); | 474 | pr_info("ldisc_install = 0"); |
461 | sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, | 475 | sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, |
462 | NULL, "install"); | 476 | NULL, "install"); |
477 | /* the following wait is never going to be completed, | ||
478 | * since the ldisc was never installed, hence serving | ||
479 | * as a mdelay of LDISC_TIME msecs */ | ||
480 | err = wait_for_completion_timeout | ||
481 | (&kim_gdata->ldisc_installed, | ||
482 | msecs_to_jiffies(LDISC_TIME)); | ||
463 | err = -ETIMEDOUT; | 483 | err = -ETIMEDOUT; |
464 | continue; | 484 | continue; |
465 | } else { | 485 | } else { |
@@ -472,6 +492,13 @@ long st_kim_start(void *kim_data) | |||
472 | pr_info("ldisc_install = 0"); | 492 | pr_info("ldisc_install = 0"); |
473 | sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, | 493 | sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, |
474 | NULL, "install"); | 494 | NULL, "install"); |
495 | /* this wait might be completed, though in the | ||
496 | * tty_close() since the ldisc is already | ||
497 | * installed */ | ||
498 | err = wait_for_completion_timeout | ||
499 | (&kim_gdata->ldisc_installed, | ||
500 | msecs_to_jiffies(LDISC_TIME)); | ||
501 | err = -EINVAL; | ||
475 | continue; | 502 | continue; |
476 | } else { /* on success don't retry */ | 503 | } else { /* on success don't retry */ |
477 | break; | 504 | break; |
@@ -489,6 +516,8 @@ long st_kim_stop(void *kim_data) | |||
489 | { | 516 | { |
490 | long err = 0; | 517 | long err = 0; |
491 | struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; | 518 | struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; |
519 | struct ti_st_plat_data *pdata = | ||
520 | kim_gdata->kim_pdev->dev.platform_data; | ||
492 | 521 | ||
493 | INIT_COMPLETION(kim_gdata->ldisc_installed); | 522 | INIT_COMPLETION(kim_gdata->ldisc_installed); |
494 | 523 | ||
@@ -515,6 +544,10 @@ long st_kim_stop(void *kim_data) | |||
515 | gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); | 544 | gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); |
516 | mdelay(1); | 545 | mdelay(1); |
517 | gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); | 546 | gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); |
547 | |||
548 | /* platform specific disable */ | ||
549 | if (pdata->chip_disable) | ||
550 | pdata->chip_disable(kim_gdata); | ||
518 | return err; | 551 | return err; |
519 | } | 552 | } |
520 | 553 | ||
diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c index 3f2495138855..1ff460a8e9c7 100644 --- a/drivers/misc/ti-st/st_ll.c +++ b/drivers/misc/ti-st/st_ll.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #define pr_fmt(fmt) "(stll) :" fmt | 22 | #define pr_fmt(fmt) "(stll) :" fmt |
23 | #include <linux/skbuff.h> | 23 | #include <linux/skbuff.h> |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/platform_device.h> | ||
25 | #include <linux/ti_wilink_st.h> | 26 | #include <linux/ti_wilink_st.h> |
26 | 27 | ||
27 | /**********************************************************************/ | 28 | /**********************************************************************/ |
@@ -37,6 +38,9 @@ static void send_ll_cmd(struct st_data_s *st_data, | |||
37 | 38 | ||
38 | static void ll_device_want_to_sleep(struct st_data_s *st_data) | 39 | static void ll_device_want_to_sleep(struct st_data_s *st_data) |
39 | { | 40 | { |
41 | struct kim_data_s *kim_data; | ||
42 | struct ti_st_plat_data *pdata; | ||
43 | |||
40 | pr_debug("%s", __func__); | 44 | pr_debug("%s", __func__); |
41 | /* sanity check */ | 45 | /* sanity check */ |
42 | if (st_data->ll_state != ST_LL_AWAKE) | 46 | if (st_data->ll_state != ST_LL_AWAKE) |
@@ -46,10 +50,19 @@ static void ll_device_want_to_sleep(struct st_data_s *st_data) | |||
46 | send_ll_cmd(st_data, LL_SLEEP_ACK); | 50 | send_ll_cmd(st_data, LL_SLEEP_ACK); |
47 | /* update state */ | 51 | /* update state */ |
48 | st_data->ll_state = ST_LL_ASLEEP; | 52 | st_data->ll_state = ST_LL_ASLEEP; |
53 | |||
54 | /* communicate to platform about chip asleep */ | ||
55 | kim_data = st_data->kim_data; | ||
56 | pdata = kim_data->kim_pdev->dev.platform_data; | ||
57 | if (pdata->chip_asleep) | ||
58 | pdata->chip_asleep(NULL); | ||
49 | } | 59 | } |
50 | 60 | ||
51 | static void ll_device_want_to_wakeup(struct st_data_s *st_data) | 61 | static void ll_device_want_to_wakeup(struct st_data_s *st_data) |
52 | { | 62 | { |
63 | struct kim_data_s *kim_data; | ||
64 | struct ti_st_plat_data *pdata; | ||
65 | |||
53 | /* diff actions in diff states */ | 66 | /* diff actions in diff states */ |
54 | switch (st_data->ll_state) { | 67 | switch (st_data->ll_state) { |
55 | case ST_LL_ASLEEP: | 68 | case ST_LL_ASLEEP: |
@@ -70,6 +83,12 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data) | |||
70 | } | 83 | } |
71 | /* update state */ | 84 | /* update state */ |
72 | st_data->ll_state = ST_LL_AWAKE; | 85 | st_data->ll_state = ST_LL_AWAKE; |
86 | |||
87 | /* communicate to platform about chip wakeup */ | ||
88 | kim_data = st_data->kim_data; | ||
89 | pdata = kim_data->kim_pdev->dev.platform_data; | ||
90 | if (pdata->chip_asleep) | ||
91 | pdata->chip_awake(NULL); | ||
73 | } | 92 | } |
74 | 93 | ||
75 | /**********************************************************************/ | 94 | /**********************************************************************/ |
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 1ff5486213fb..4c1a648d00fc 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -926,6 +926,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | |||
926 | /* | 926 | /* |
927 | * Reliable writes are used to implement Forced Unit Access and | 927 | * Reliable writes are used to implement Forced Unit Access and |
928 | * REQ_META accesses, and are supported only on MMCs. | 928 | * REQ_META accesses, and are supported only on MMCs. |
929 | * | ||
930 | * XXX: this really needs a good explanation of why REQ_META | ||
931 | * is treated special. | ||
929 | */ | 932 | */ |
930 | bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || | 933 | bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || |
931 | (req->cmd_flags & REQ_META)) && | 934 | (req->cmd_flags & REQ_META)) && |
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 006a5e9f8ab8..2bf229acd3b8 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c | |||
@@ -224,7 +224,7 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test, | |||
224 | static int mmc_test_busy(struct mmc_command *cmd) | 224 | static int mmc_test_busy(struct mmc_command *cmd) |
225 | { | 225 | { |
226 | return !(cmd->resp[0] & R1_READY_FOR_DATA) || | 226 | return !(cmd->resp[0] & R1_READY_FOR_DATA) || |
227 | (R1_CURRENT_STATE(cmd->resp[0]) == 7); | 227 | (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG); |
228 | } | 228 | } |
229 | 229 | ||
230 | /* | 230 | /* |
@@ -2900,7 +2900,7 @@ static const struct file_operations mmc_test_fops_testlist = { | |||
2900 | .release = single_release, | 2900 | .release = single_release, |
2901 | }; | 2901 | }; |
2902 | 2902 | ||
2903 | static void mmc_test_free_file_test(struct mmc_card *card) | 2903 | static void mmc_test_free_dbgfs_file(struct mmc_card *card) |
2904 | { | 2904 | { |
2905 | struct mmc_test_dbgfs_file *df, *dfs; | 2905 | struct mmc_test_dbgfs_file *df, *dfs; |
2906 | 2906 | ||
@@ -2917,34 +2917,21 @@ static void mmc_test_free_file_test(struct mmc_card *card) | |||
2917 | mutex_unlock(&mmc_test_lock); | 2917 | mutex_unlock(&mmc_test_lock); |
2918 | } | 2918 | } |
2919 | 2919 | ||
2920 | static int mmc_test_register_file_test(struct mmc_card *card) | 2920 | static int __mmc_test_register_dbgfs_file(struct mmc_card *card, |
2921 | const char *name, mode_t mode, const struct file_operations *fops) | ||
2921 | { | 2922 | { |
2922 | struct dentry *file = NULL; | 2923 | struct dentry *file = NULL; |
2923 | struct mmc_test_dbgfs_file *df; | 2924 | struct mmc_test_dbgfs_file *df; |
2924 | int ret = 0; | ||
2925 | |||
2926 | mutex_lock(&mmc_test_lock); | ||
2927 | |||
2928 | if (card->debugfs_root) | ||
2929 | file = debugfs_create_file("test", S_IWUSR | S_IRUGO, | ||
2930 | card->debugfs_root, card, &mmc_test_fops_test); | ||
2931 | |||
2932 | if (IS_ERR_OR_NULL(file)) { | ||
2933 | dev_err(&card->dev, | ||
2934 | "Can't create test. Perhaps debugfs is disabled.\n"); | ||
2935 | ret = -ENODEV; | ||
2936 | goto err; | ||
2937 | } | ||
2938 | 2925 | ||
2939 | if (card->debugfs_root) | 2926 | if (card->debugfs_root) |
2940 | file = debugfs_create_file("testlist", S_IRUGO, | 2927 | file = debugfs_create_file(name, mode, card->debugfs_root, |
2941 | card->debugfs_root, card, &mmc_test_fops_testlist); | 2928 | card, fops); |
2942 | 2929 | ||
2943 | if (IS_ERR_OR_NULL(file)) { | 2930 | if (IS_ERR_OR_NULL(file)) { |
2944 | dev_err(&card->dev, | 2931 | dev_err(&card->dev, |
2945 | "Can't create testlist. Perhaps debugfs is disabled.\n"); | 2932 | "Can't create %s. Perhaps debugfs is disabled.\n", |
2946 | ret = -ENODEV; | 2933 | name); |
2947 | goto err; | 2934 | return -ENODEV; |
2948 | } | 2935 | } |
2949 | 2936 | ||
2950 | df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL); | 2937 | df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL); |
@@ -2952,14 +2939,31 @@ static int mmc_test_register_file_test(struct mmc_card *card) | |||
2952 | debugfs_remove(file); | 2939 | debugfs_remove(file); |
2953 | dev_err(&card->dev, | 2940 | dev_err(&card->dev, |
2954 | "Can't allocate memory for internal usage.\n"); | 2941 | "Can't allocate memory for internal usage.\n"); |
2955 | ret = -ENOMEM; | 2942 | return -ENOMEM; |
2956 | goto err; | ||
2957 | } | 2943 | } |
2958 | 2944 | ||
2959 | df->card = card; | 2945 | df->card = card; |
2960 | df->file = file; | 2946 | df->file = file; |
2961 | 2947 | ||
2962 | list_add(&df->link, &mmc_test_file_test); | 2948 | list_add(&df->link, &mmc_test_file_test); |
2949 | return 0; | ||
2950 | } | ||
2951 | |||
2952 | static int mmc_test_register_dbgfs_file(struct mmc_card *card) | ||
2953 | { | ||
2954 | int ret; | ||
2955 | |||
2956 | mutex_lock(&mmc_test_lock); | ||
2957 | |||
2958 | ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO, | ||
2959 | &mmc_test_fops_test); | ||
2960 | if (ret) | ||
2961 | goto err; | ||
2962 | |||
2963 | ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO, | ||
2964 | &mmc_test_fops_testlist); | ||
2965 | if (ret) | ||
2966 | goto err; | ||
2963 | 2967 | ||
2964 | err: | 2968 | err: |
2965 | mutex_unlock(&mmc_test_lock); | 2969 | mutex_unlock(&mmc_test_lock); |
@@ -2974,7 +2978,7 @@ static int mmc_test_probe(struct mmc_card *card) | |||
2974 | if (!mmc_card_mmc(card) && !mmc_card_sd(card)) | 2978 | if (!mmc_card_mmc(card) && !mmc_card_sd(card)) |
2975 | return -ENODEV; | 2979 | return -ENODEV; |
2976 | 2980 | ||
2977 | ret = mmc_test_register_file_test(card); | 2981 | ret = mmc_test_register_dbgfs_file(card); |
2978 | if (ret) | 2982 | if (ret) |
2979 | return ret; | 2983 | return ret; |
2980 | 2984 | ||
@@ -2986,7 +2990,7 @@ static int mmc_test_probe(struct mmc_card *card) | |||
2986 | static void mmc_test_remove(struct mmc_card *card) | 2990 | static void mmc_test_remove(struct mmc_card *card) |
2987 | { | 2991 | { |
2988 | mmc_test_free_result(card); | 2992 | mmc_test_free_result(card); |
2989 | mmc_test_free_file_test(card); | 2993 | mmc_test_free_dbgfs_file(card); |
2990 | } | 2994 | } |
2991 | 2995 | ||
2992 | static struct mmc_driver mmc_driver = { | 2996 | static struct mmc_driver mmc_driver = { |
@@ -3006,7 +3010,7 @@ static void __exit mmc_test_exit(void) | |||
3006 | { | 3010 | { |
3007 | /* Clear stalled data if card is still plugged */ | 3011 | /* Clear stalled data if card is still plugged */ |
3008 | mmc_test_free_result(NULL); | 3012 | mmc_test_free_result(NULL); |
3009 | mmc_test_free_file_test(NULL); | 3013 | mmc_test_free_dbgfs_file(NULL); |
3010 | 3014 | ||
3011 | mmc_unregister_driver(&mmc_driver); | 3015 | mmc_unregister_driver(&mmc_driver); |
3012 | } | 3016 | } |
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 89bdeaec7182..b27b94078c21 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c | |||
@@ -133,7 +133,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) | |||
133 | if (mrq->done) | 133 | if (mrq->done) |
134 | mrq->done(mrq); | 134 | mrq->done(mrq); |
135 | 135 | ||
136 | mmc_host_clk_gate(host); | 136 | mmc_host_clk_release(host); |
137 | } | 137 | } |
138 | } | 138 | } |
139 | 139 | ||
@@ -192,7 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) | |||
192 | mrq->stop->mrq = mrq; | 192 | mrq->stop->mrq = mrq; |
193 | } | 193 | } |
194 | } | 194 | } |
195 | mmc_host_clk_ungate(host); | 195 | mmc_host_clk_hold(host); |
196 | led_trigger_event(host->led, LED_FULL); | 196 | led_trigger_event(host->led, LED_FULL); |
197 | host->ops->request(host, mrq); | 197 | host->ops->request(host, mrq); |
198 | } | 198 | } |
@@ -728,15 +728,17 @@ static inline void mmc_set_ios(struct mmc_host *host) | |||
728 | */ | 728 | */ |
729 | void mmc_set_chip_select(struct mmc_host *host, int mode) | 729 | void mmc_set_chip_select(struct mmc_host *host, int mode) |
730 | { | 730 | { |
731 | mmc_host_clk_hold(host); | ||
731 | host->ios.chip_select = mode; | 732 | host->ios.chip_select = mode; |
732 | mmc_set_ios(host); | 733 | mmc_set_ios(host); |
734 | mmc_host_clk_release(host); | ||
733 | } | 735 | } |
734 | 736 | ||
735 | /* | 737 | /* |
736 | * Sets the host clock to the highest possible frequency that | 738 | * Sets the host clock to the highest possible frequency that |
737 | * is below "hz". | 739 | * is below "hz". |
738 | */ | 740 | */ |
739 | void mmc_set_clock(struct mmc_host *host, unsigned int hz) | 741 | static void __mmc_set_clock(struct mmc_host *host, unsigned int hz) |
740 | { | 742 | { |
741 | WARN_ON(hz < host->f_min); | 743 | WARN_ON(hz < host->f_min); |
742 | 744 | ||
@@ -747,6 +749,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz) | |||
747 | mmc_set_ios(host); | 749 | mmc_set_ios(host); |
748 | } | 750 | } |
749 | 751 | ||
752 | void mmc_set_clock(struct mmc_host *host, unsigned int hz) | ||
753 | { | ||
754 | mmc_host_clk_hold(host); | ||
755 | __mmc_set_clock(host, hz); | ||
756 | mmc_host_clk_release(host); | ||
757 | } | ||
758 | |||
750 | #ifdef CONFIG_MMC_CLKGATE | 759 | #ifdef CONFIG_MMC_CLKGATE |
751 | /* | 760 | /* |
752 | * This gates the clock by setting it to 0 Hz. | 761 | * This gates the clock by setting it to 0 Hz. |
@@ -779,7 +788,7 @@ void mmc_ungate_clock(struct mmc_host *host) | |||
779 | if (host->clk_old) { | 788 | if (host->clk_old) { |
780 | BUG_ON(host->ios.clock); | 789 | BUG_ON(host->ios.clock); |
781 | /* This call will also set host->clk_gated to false */ | 790 | /* This call will also set host->clk_gated to false */ |
782 | mmc_set_clock(host, host->clk_old); | 791 | __mmc_set_clock(host, host->clk_old); |
783 | } | 792 | } |
784 | } | 793 | } |
785 | 794 | ||
@@ -807,8 +816,10 @@ void mmc_set_ungated(struct mmc_host *host) | |||
807 | */ | 816 | */ |
808 | void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) | 817 | void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) |
809 | { | 818 | { |
819 | mmc_host_clk_hold(host); | ||
810 | host->ios.bus_mode = mode; | 820 | host->ios.bus_mode = mode; |
811 | mmc_set_ios(host); | 821 | mmc_set_ios(host); |
822 | mmc_host_clk_release(host); | ||
812 | } | 823 | } |
813 | 824 | ||
814 | /* | 825 | /* |
@@ -816,8 +827,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) | |||
816 | */ | 827 | */ |
817 | void mmc_set_bus_width(struct mmc_host *host, unsigned int width) | 828 | void mmc_set_bus_width(struct mmc_host *host, unsigned int width) |
818 | { | 829 | { |
830 | mmc_host_clk_hold(host); | ||
819 | host->ios.bus_width = width; | 831 | host->ios.bus_width = width; |
820 | mmc_set_ios(host); | 832 | mmc_set_ios(host); |
833 | mmc_host_clk_release(host); | ||
821 | } | 834 | } |
822 | 835 | ||
823 | /** | 836 | /** |
@@ -1015,8 +1028,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) | |||
1015 | 1028 | ||
1016 | ocr &= 3 << bit; | 1029 | ocr &= 3 << bit; |
1017 | 1030 | ||
1031 | mmc_host_clk_hold(host); | ||
1018 | host->ios.vdd = bit; | 1032 | host->ios.vdd = bit; |
1019 | mmc_set_ios(host); | 1033 | mmc_set_ios(host); |
1034 | mmc_host_clk_release(host); | ||
1020 | } else { | 1035 | } else { |
1021 | pr_warning("%s: host doesn't support card's voltages\n", | 1036 | pr_warning("%s: host doesn't support card's voltages\n", |
1022 | mmc_hostname(host)); | 1037 | mmc_hostname(host)); |
@@ -1063,8 +1078,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11 | |||
1063 | */ | 1078 | */ |
1064 | void mmc_set_timing(struct mmc_host *host, unsigned int timing) | 1079 | void mmc_set_timing(struct mmc_host *host, unsigned int timing) |
1065 | { | 1080 | { |
1081 | mmc_host_clk_hold(host); | ||
1066 | host->ios.timing = timing; | 1082 | host->ios.timing = timing; |
1067 | mmc_set_ios(host); | 1083 | mmc_set_ios(host); |
1084 | mmc_host_clk_release(host); | ||
1068 | } | 1085 | } |
1069 | 1086 | ||
1070 | /* | 1087 | /* |
@@ -1072,8 +1089,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing) | |||
1072 | */ | 1089 | */ |
1073 | void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) | 1090 | void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) |
1074 | { | 1091 | { |
1092 | mmc_host_clk_hold(host); | ||
1075 | host->ios.drv_type = drv_type; | 1093 | host->ios.drv_type = drv_type; |
1076 | mmc_set_ios(host); | 1094 | mmc_set_ios(host); |
1095 | mmc_host_clk_release(host); | ||
1077 | } | 1096 | } |
1078 | 1097 | ||
1079 | /* | 1098 | /* |
@@ -1091,6 +1110,8 @@ static void mmc_power_up(struct mmc_host *host) | |||
1091 | { | 1110 | { |
1092 | int bit; | 1111 | int bit; |
1093 | 1112 | ||
1113 | mmc_host_clk_hold(host); | ||
1114 | |||
1094 | /* If ocr is set, we use it */ | 1115 | /* If ocr is set, we use it */ |
1095 | if (host->ocr) | 1116 | if (host->ocr) |
1096 | bit = ffs(host->ocr) - 1; | 1117 | bit = ffs(host->ocr) - 1; |
@@ -1126,10 +1147,14 @@ static void mmc_power_up(struct mmc_host *host) | |||
1126 | * time required to reach a stable voltage. | 1147 | * time required to reach a stable voltage. |
1127 | */ | 1148 | */ |
1128 | mmc_delay(10); | 1149 | mmc_delay(10); |
1150 | |||
1151 | mmc_host_clk_release(host); | ||
1129 | } | 1152 | } |
1130 | 1153 | ||
1131 | static void mmc_power_off(struct mmc_host *host) | 1154 | static void mmc_power_off(struct mmc_host *host) |
1132 | { | 1155 | { |
1156 | mmc_host_clk_hold(host); | ||
1157 | |||
1133 | host->ios.clock = 0; | 1158 | host->ios.clock = 0; |
1134 | host->ios.vdd = 0; | 1159 | host->ios.vdd = 0; |
1135 | 1160 | ||
@@ -1147,6 +1172,8 @@ static void mmc_power_off(struct mmc_host *host) | |||
1147 | host->ios.bus_width = MMC_BUS_WIDTH_1; | 1172 | host->ios.bus_width = MMC_BUS_WIDTH_1; |
1148 | host->ios.timing = MMC_TIMING_LEGACY; | 1173 | host->ios.timing = MMC_TIMING_LEGACY; |
1149 | mmc_set_ios(host); | 1174 | mmc_set_ios(host); |
1175 | |||
1176 | mmc_host_clk_release(host); | ||
1150 | } | 1177 | } |
1151 | 1178 | ||
1152 | /* | 1179 | /* |
@@ -1502,7 +1529,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, | |||
1502 | goto out; | 1529 | goto out; |
1503 | } | 1530 | } |
1504 | } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || | 1531 | } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || |
1505 | R1_CURRENT_STATE(cmd.resp[0]) == 7); | 1532 | R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG); |
1506 | out: | 1533 | out: |
1507 | return err; | 1534 | return err; |
1508 | } | 1535 | } |
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index b29d3e8fd3a2..793d0a0dad8d 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c | |||
@@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work) | |||
119 | } | 119 | } |
120 | 120 | ||
121 | /** | 121 | /** |
122 | * mmc_host_clk_ungate - ungate hardware MCI clocks | 122 | * mmc_host_clk_hold - ungate hardware MCI clocks |
123 | * @host: host to ungate. | 123 | * @host: host to ungate. |
124 | * | 124 | * |
125 | * Makes sure the host ios.clock is restored to a non-zero value | 125 | * Makes sure the host ios.clock is restored to a non-zero value |
126 | * past this call. Increase clock reference count and ungate clock | 126 | * past this call. Increase clock reference count and ungate clock |
127 | * if we're the first user. | 127 | * if we're the first user. |
128 | */ | 128 | */ |
129 | void mmc_host_clk_ungate(struct mmc_host *host) | 129 | void mmc_host_clk_hold(struct mmc_host *host) |
130 | { | 130 | { |
131 | unsigned long flags; | 131 | unsigned long flags; |
132 | 132 | ||
@@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card) | |||
164 | } | 164 | } |
165 | 165 | ||
166 | /** | 166 | /** |
167 | * mmc_host_clk_gate - gate off hardware MCI clocks | 167 | * mmc_host_clk_release - gate off hardware MCI clocks |
168 | * @host: host to gate. | 168 | * @host: host to gate. |
169 | * | 169 | * |
170 | * Calls the host driver with ios.clock set to zero as often as possible | 170 | * Calls the host driver with ios.clock set to zero as often as possible |
171 | * in order to gate off hardware MCI clocks. Decrease clock reference | 171 | * in order to gate off hardware MCI clocks. Decrease clock reference |
172 | * count and schedule disabling of clock. | 172 | * count and schedule disabling of clock. |
173 | */ | 173 | */ |
174 | void mmc_host_clk_gate(struct mmc_host *host) | 174 | void mmc_host_clk_release(struct mmc_host *host) |
175 | { | 175 | { |
176 | unsigned long flags; | 176 | unsigned long flags; |
177 | 177 | ||
@@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host) | |||
179 | host->clk_requests--; | 179 | host->clk_requests--; |
180 | if (mmc_host_may_gate_card(host->card) && | 180 | if (mmc_host_may_gate_card(host->card) && |
181 | !host->clk_requests) | 181 | !host->clk_requests) |
182 | schedule_work(&host->clk_gate_work); | 182 | queue_work(system_nrt_wq, &host->clk_gate_work); |
183 | spin_unlock_irqrestore(&host->clk_lock, flags); | 183 | spin_unlock_irqrestore(&host->clk_lock, flags); |
184 | } | 184 | } |
185 | 185 | ||
@@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host) | |||
231 | if (cancel_work_sync(&host->clk_gate_work)) | 231 | if (cancel_work_sync(&host->clk_gate_work)) |
232 | mmc_host_clk_gate_delayed(host); | 232 | mmc_host_clk_gate_delayed(host); |
233 | if (host->clk_gated) | 233 | if (host->clk_gated) |
234 | mmc_host_clk_ungate(host); | 234 | mmc_host_clk_hold(host); |
235 | /* There should be only one user now */ | 235 | /* There should be only one user now */ |
236 | WARN_ON(host->clk_requests > 1); | 236 | WARN_ON(host->clk_requests > 1); |
237 | } | 237 | } |
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index de199f911928..fb8a5cd2e4a1 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h | |||
@@ -16,16 +16,16 @@ int mmc_register_host_class(void); | |||
16 | void mmc_unregister_host_class(void); | 16 | void mmc_unregister_host_class(void); |
17 | 17 | ||
18 | #ifdef CONFIG_MMC_CLKGATE | 18 | #ifdef CONFIG_MMC_CLKGATE |
19 | void mmc_host_clk_ungate(struct mmc_host *host); | 19 | void mmc_host_clk_hold(struct mmc_host *host); |
20 | void mmc_host_clk_gate(struct mmc_host *host); | 20 | void mmc_host_clk_release(struct mmc_host *host); |
21 | unsigned int mmc_host_clk_rate(struct mmc_host *host); | 21 | unsigned int mmc_host_clk_rate(struct mmc_host *host); |
22 | 22 | ||
23 | #else | 23 | #else |
24 | static inline void mmc_host_clk_ungate(struct mmc_host *host) | 24 | static inline void mmc_host_clk_hold(struct mmc_host *host) |
25 | { | 25 | { |
26 | } | 26 | } |
27 | 27 | ||
28 | static inline void mmc_host_clk_gate(struct mmc_host *host) | 28 | static inline void mmc_host_clk_release(struct mmc_host *host) |
29 | { | 29 | { |
30 | } | 30 | } |
31 | 31 | ||
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index aa7d1d79b8c5..5700b1cbdfec 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
@@ -259,7 +259,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
259 | } | 259 | } |
260 | 260 | ||
261 | card->ext_csd.rev = ext_csd[EXT_CSD_REV]; | 261 | card->ext_csd.rev = ext_csd[EXT_CSD_REV]; |
262 | if (card->ext_csd.rev > 5) { | 262 | if (card->ext_csd.rev > 6) { |
263 | printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n", | 263 | printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n", |
264 | mmc_hostname(card->host), card->ext_csd.rev); | 264 | mmc_hostname(card->host), card->ext_csd.rev); |
265 | err = -EINVAL; | 265 | err = -EINVAL; |
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 845ce7c533b9..770c3d06f5dc 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c | |||
@@ -407,7 +407,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, | |||
407 | break; | 407 | break; |
408 | if (mmc_host_is_spi(card->host)) | 408 | if (mmc_host_is_spi(card->host)) |
409 | break; | 409 | break; |
410 | } while (R1_CURRENT_STATE(status) == 7); | 410 | } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); |
411 | 411 | ||
412 | if (mmc_host_is_spi(card->host)) { | 412 | if (mmc_host_is_spi(card->host)) { |
413 | if (status & R1_SPI_ILLEGAL_COMMAND) | 413 | if (status & R1_SPI_ILLEGAL_COMMAND) |
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 633975ff2bb3..0370e03e3142 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c | |||
@@ -469,56 +469,75 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status) | |||
469 | return 0; | 469 | return 0; |
470 | } | 470 | } |
471 | 471 | ||
472 | static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) | 472 | static void sd_update_bus_speed_mode(struct mmc_card *card) |
473 | { | 473 | { |
474 | unsigned int bus_speed = 0, timing = 0; | ||
475 | int err; | ||
476 | |||
477 | /* | 474 | /* |
478 | * If the host doesn't support any of the UHS-I modes, fallback on | 475 | * If the host doesn't support any of the UHS-I modes, fallback on |
479 | * default speed. | 476 | * default speed. |
480 | */ | 477 | */ |
481 | if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | | 478 | if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | |
482 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) | 479 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) { |
483 | return 0; | 480 | card->sd_bus_speed = 0; |
481 | return; | ||
482 | } | ||
484 | 483 | ||
485 | if ((card->host->caps & MMC_CAP_UHS_SDR104) && | 484 | if ((card->host->caps & MMC_CAP_UHS_SDR104) && |
486 | (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) { | 485 | (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) { |
487 | bus_speed = UHS_SDR104_BUS_SPEED; | 486 | card->sd_bus_speed = UHS_SDR104_BUS_SPEED; |
488 | timing = MMC_TIMING_UHS_SDR104; | ||
489 | card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; | ||
490 | } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && | 487 | } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && |
491 | (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { | 488 | (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { |
492 | bus_speed = UHS_DDR50_BUS_SPEED; | 489 | card->sd_bus_speed = UHS_DDR50_BUS_SPEED; |
493 | timing = MMC_TIMING_UHS_DDR50; | ||
494 | card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; | ||
495 | } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | | 490 | } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | |
496 | MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & | 491 | MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & |
497 | SD_MODE_UHS_SDR50)) { | 492 | SD_MODE_UHS_SDR50)) { |
498 | bus_speed = UHS_SDR50_BUS_SPEED; | 493 | card->sd_bus_speed = UHS_SDR50_BUS_SPEED; |
499 | timing = MMC_TIMING_UHS_SDR50; | ||
500 | card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; | ||
501 | } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | | 494 | } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | |
502 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && | 495 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && |
503 | (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { | 496 | (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { |
504 | bus_speed = UHS_SDR25_BUS_SPEED; | 497 | card->sd_bus_speed = UHS_SDR25_BUS_SPEED; |
505 | timing = MMC_TIMING_UHS_SDR25; | ||
506 | card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; | ||
507 | } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | | 498 | } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | |
508 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | | 499 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | |
509 | MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & | 500 | MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & |
510 | SD_MODE_UHS_SDR12)) { | 501 | SD_MODE_UHS_SDR12)) { |
511 | bus_speed = UHS_SDR12_BUS_SPEED; | 502 | card->sd_bus_speed = UHS_SDR12_BUS_SPEED; |
512 | timing = MMC_TIMING_UHS_SDR12; | 503 | } |
513 | card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; | 504 | } |
505 | |||
506 | static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) | ||
507 | { | ||
508 | int err; | ||
509 | unsigned int timing = 0; | ||
510 | |||
511 | switch (card->sd_bus_speed) { | ||
512 | case UHS_SDR104_BUS_SPEED: | ||
513 | timing = MMC_TIMING_UHS_SDR104; | ||
514 | card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; | ||
515 | break; | ||
516 | case UHS_DDR50_BUS_SPEED: | ||
517 | timing = MMC_TIMING_UHS_DDR50; | ||
518 | card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; | ||
519 | break; | ||
520 | case UHS_SDR50_BUS_SPEED: | ||
521 | timing = MMC_TIMING_UHS_SDR50; | ||
522 | card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; | ||
523 | break; | ||
524 | case UHS_SDR25_BUS_SPEED: | ||
525 | timing = MMC_TIMING_UHS_SDR25; | ||
526 | card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; | ||
527 | break; | ||
528 | case UHS_SDR12_BUS_SPEED: | ||
529 | timing = MMC_TIMING_UHS_SDR12; | ||
530 | card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; | ||
531 | break; | ||
532 | default: | ||
533 | return 0; | ||
514 | } | 534 | } |
515 | 535 | ||
516 | card->sd_bus_speed = bus_speed; | 536 | err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status); |
517 | err = mmc_sd_switch(card, 1, 0, bus_speed, status); | ||
518 | if (err) | 537 | if (err) |
519 | return err; | 538 | return err; |
520 | 539 | ||
521 | if ((status[16] & 0xF) != bus_speed) | 540 | if ((status[16] & 0xF) != card->sd_bus_speed) |
522 | printk(KERN_WARNING "%s: Problem setting bus speed mode!\n", | 541 | printk(KERN_WARNING "%s: Problem setting bus speed mode!\n", |
523 | mmc_hostname(card->host)); | 542 | mmc_hostname(card->host)); |
524 | else { | 543 | else { |
@@ -618,18 +637,24 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card) | |||
618 | mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); | 637 | mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); |
619 | } | 638 | } |
620 | 639 | ||
640 | /* | ||
641 | * Select the bus speed mode depending on host | ||
642 | * and card capability. | ||
643 | */ | ||
644 | sd_update_bus_speed_mode(card); | ||
645 | |||
621 | /* Set the driver strength for the card */ | 646 | /* Set the driver strength for the card */ |
622 | err = sd_select_driver_type(card, status); | 647 | err = sd_select_driver_type(card, status); |
623 | if (err) | 648 | if (err) |
624 | goto out; | 649 | goto out; |
625 | 650 | ||
626 | /* Set bus speed mode of the card */ | 651 | /* Set current limit for the card */ |
627 | err = sd_set_bus_speed_mode(card, status); | 652 | err = sd_set_current_limit(card, status); |
628 | if (err) | 653 | if (err) |
629 | goto out; | 654 | goto out; |
630 | 655 | ||
631 | /* Set current limit for the card */ | 656 | /* Set bus speed mode of the card */ |
632 | err = sd_set_current_limit(card, status); | 657 | err = sd_set_bus_speed_mode(card, status); |
633 | if (err) | 658 | if (err) |
634 | goto out; | 659 | goto out; |
635 | 660 | ||
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 77f0b6b1681d..ff0f714b012c 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -62,7 +62,7 @@ struct idmac_desc { | |||
62 | 62 | ||
63 | u32 des1; /* Buffer sizes */ | 63 | u32 des1; /* Buffer sizes */ |
64 | #define IDMAC_SET_BUFFER1_SIZE(d, s) \ | 64 | #define IDMAC_SET_BUFFER1_SIZE(d, s) \ |
65 | ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff)) | 65 | ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff)) |
66 | 66 | ||
67 | u32 des2; /* buffer 1 physical address */ | 67 | u32 des2; /* buffer 1 physical address */ |
68 | 68 | ||
@@ -699,7 +699,7 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
699 | } | 699 | } |
700 | 700 | ||
701 | /* DDR mode set */ | 701 | /* DDR mode set */ |
702 | if (ios->ddr) { | 702 | if (ios->timing == MMC_TIMING_UHS_DDR50) { |
703 | regs = mci_readl(slot->host, UHS_REG); | 703 | regs = mci_readl(slot->host, UHS_REG); |
704 | regs |= (0x1 << slot->id) << 16; | 704 | regs |= (0x1 << slot->id) << 16; |
705 | mci_writel(slot->host, UHS_REG, regs); | 705 | mci_writel(slot->host, UHS_REG, regs); |
@@ -1646,7 +1646,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) | |||
1646 | mmc->caps |= MMC_CAP_4_BIT_DATA; | 1646 | mmc->caps |= MMC_CAP_4_BIT_DATA; |
1647 | 1647 | ||
1648 | if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) | 1648 | if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) |
1649 | mmc->caps |= MMC_CAP_SD_HIGHSPEED; | 1649 | mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; |
1650 | 1650 | ||
1651 | #ifdef CONFIG_MMC_DW_IDMAC | 1651 | #ifdef CONFIG_MMC_DW_IDMAC |
1652 | mmc->max_segs = host->ring_size; | 1652 | mmc->max_segs = host->ring_size; |
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 9ebfb4b482f5..4dc0028086a3 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/gpio.h> | 18 | #include <linux/gpio.h> |
19 | #include <linux/module.h> | ||
19 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
20 | #include <linux/mmc/host.h> | 21 | #include <linux/mmc/host.h> |
21 | #include <linux/mmc/mmc.h> | 22 | #include <linux/mmc/mmc.h> |
@@ -27,6 +28,7 @@ | |||
27 | #include "sdhci-pltfm.h" | 28 | #include "sdhci-pltfm.h" |
28 | #include "sdhci-esdhc.h" | 29 | #include "sdhci-esdhc.h" |
29 | 30 | ||
31 | #define SDHCI_CTRL_D3CD 0x08 | ||
30 | /* VENDOR SPEC register */ | 32 | /* VENDOR SPEC register */ |
31 | #define SDHCI_VENDOR_SPEC 0xC0 | 33 | #define SDHCI_VENDOR_SPEC 0xC0 |
32 | #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 | 34 | #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 |
@@ -141,13 +143,32 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) | |||
141 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 143 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
142 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | 144 | struct pltfm_imx_data *imx_data = pltfm_host->priv; |
143 | struct esdhc_platform_data *boarddata = &imx_data->boarddata; | 145 | struct esdhc_platform_data *boarddata = &imx_data->boarddata; |
144 | 146 | u32 data; | |
145 | if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) | 147 | |
146 | && (boarddata->cd_type == ESDHC_CD_GPIO))) | 148 | if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { |
147 | /* | 149 | if (boarddata->cd_type == ESDHC_CD_GPIO) |
148 | * these interrupts won't work with a custom card_detect gpio | 150 | /* |
149 | */ | 151 | * These interrupts won't work with a custom |
150 | val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); | 152 | * card_detect gpio (only applied to mx25/35) |
153 | */ | ||
154 | val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); | ||
155 | |||
156 | if (val & SDHCI_INT_CARD_INT) { | ||
157 | /* | ||
158 | * Clear and then set D3CD bit to avoid missing the | ||
159 | * card interrupt. This is a eSDHC controller problem | ||
160 | * so we need to apply the following workaround: clear | ||
161 | * and set D3CD bit will make eSDHC re-sample the card | ||
162 | * interrupt. In case a card interrupt was lost, | ||
163 | * re-sample it by the following steps. | ||
164 | */ | ||
165 | data = readl(host->ioaddr + SDHCI_HOST_CONTROL); | ||
166 | data &= ~SDHCI_CTRL_D3CD; | ||
167 | writel(data, host->ioaddr + SDHCI_HOST_CONTROL); | ||
168 | data |= SDHCI_CTRL_D3CD; | ||
169 | writel(data, host->ioaddr + SDHCI_HOST_CONTROL); | ||
170 | } | ||
171 | } | ||
151 | 172 | ||
152 | if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) | 173 | if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) |
153 | && (reg == SDHCI_INT_STATUS) | 174 | && (reg == SDHCI_INT_STATUS) |
@@ -217,8 +238,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) | |||
217 | */ | 238 | */ |
218 | return; | 239 | return; |
219 | case SDHCI_HOST_CONTROL: | 240 | case SDHCI_HOST_CONTROL: |
220 | /* FSL messed up here, so we can just keep those two */ | 241 | /* FSL messed up here, so we can just keep those three */ |
221 | new_val = val & (SDHCI_CTRL_LED | SDHCI_CTRL_4BITBUS); | 242 | new_val = val & (SDHCI_CTRL_LED | \ |
243 | SDHCI_CTRL_4BITBUS | \ | ||
244 | SDHCI_CTRL_D3CD); | ||
222 | /* ensure the endianess */ | 245 | /* ensure the endianess */ |
223 | new_val |= ESDHC_HOST_CONTROL_LE; | 246 | new_val |= ESDHC_HOST_CONTROL_LE; |
224 | /* DMA mode bits are shifted */ | 247 | /* DMA mode bits are shifted */ |
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 4198dbbc5c20..fc7e4a515629 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c | |||
@@ -195,7 +195,8 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev) | |||
195 | clk_enable(clk); | 195 | clk_enable(clk); |
196 | 196 | ||
197 | host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 197 | host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
198 | | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; | 198 | | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
199 | | SDHCI_QUIRK_32BIT_ADMA_SIZE; | ||
199 | 200 | ||
200 | /* enable 1/8V DDR capable */ | 201 | /* enable 1/8V DDR capable */ |
201 | host->mmc->caps |= MMC_CAP_1_8V_DDR; | 202 | host->mmc->caps |= MMC_CAP_1_8V_DDR; |
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 460ffaf0f6d7..fe886d6c474a 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/gpio.h> | 21 | #include <linux/gpio.h> |
22 | #include <linux/module.h> | ||
22 | 23 | ||
23 | #include <linux/mmc/host.h> | 24 | #include <linux/mmc/host.h> |
24 | 25 | ||
@@ -301,6 +302,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width) | |||
301 | ctrl &= ~SDHCI_CTRL_8BITBUS; | 302 | ctrl &= ~SDHCI_CTRL_8BITBUS; |
302 | break; | 303 | break; |
303 | default: | 304 | default: |
305 | ctrl &= ~SDHCI_CTRL_4BITBUS; | ||
306 | ctrl &= ~SDHCI_CTRL_8BITBUS; | ||
304 | break; | 307 | break; |
305 | } | 308 | } |
306 | 309 | ||
@@ -502,6 +505,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) | |||
502 | /* This host supports the Auto CMD12 */ | 505 | /* This host supports the Auto CMD12 */ |
503 | host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; | 506 | host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; |
504 | 507 | ||
508 | /* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */ | ||
509 | host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC; | ||
510 | |||
505 | if (pdata->cd_type == S3C_SDHCI_CD_NONE || | 511 | if (pdata->cd_type == S3C_SDHCI_CD_NONE || |
506 | pdata->cd_type == S3C_SDHCI_CD_PERMANENT) | 512 | pdata->cd_type == S3C_SDHCI_CD_PERMANENT) |
507 | host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; | 513 | host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index c31a3343340d..0e02cc1df12e 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -628,12 +628,11 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) | |||
628 | /* timeout in us */ | 628 | /* timeout in us */ |
629 | if (!data) | 629 | if (!data) |
630 | target_timeout = cmd->cmd_timeout_ms * 1000; | 630 | target_timeout = cmd->cmd_timeout_ms * 1000; |
631 | else | 631 | else { |
632 | target_timeout = data->timeout_ns / 1000 + | 632 | target_timeout = data->timeout_ns / 1000; |
633 | data->timeout_clks / host->clock; | 633 | if (host->clock) |
634 | 634 | target_timeout += data->timeout_clks / host->clock; | |
635 | if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) | 635 | } |
636 | host->timeout_clk = host->clock / 1000; | ||
637 | 636 | ||
638 | /* | 637 | /* |
639 | * Figure out needed cycles. | 638 | * Figure out needed cycles. |
@@ -645,7 +644,6 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) | |||
645 | * => | 644 | * => |
646 | * (1) / (2) > 2^6 | 645 | * (1) / (2) > 2^6 |
647 | */ | 646 | */ |
648 | BUG_ON(!host->timeout_clk); | ||
649 | count = 0; | 647 | count = 0; |
650 | current_timeout = (1 << 13) * 1000 / host->timeout_clk; | 648 | current_timeout = (1 << 13) * 1000 / host->timeout_clk; |
651 | while (current_timeout < target_timeout) { | 649 | while (current_timeout < target_timeout) { |
@@ -1867,9 +1865,6 @@ static void sdhci_tasklet_finish(unsigned long param) | |||
1867 | 1865 | ||
1868 | del_timer(&host->timer); | 1866 | del_timer(&host->timer); |
1869 | 1867 | ||
1870 | if (host->version >= SDHCI_SPEC_300) | ||
1871 | del_timer(&host->tuning_timer); | ||
1872 | |||
1873 | mrq = host->mrq; | 1868 | mrq = host->mrq; |
1874 | 1869 | ||
1875 | /* | 1870 | /* |
@@ -2461,22 +2456,6 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2461 | host->max_clk = host->ops->get_max_clock(host); | 2456 | host->max_clk = host->ops->get_max_clock(host); |
2462 | } | 2457 | } |
2463 | 2458 | ||
2464 | host->timeout_clk = | ||
2465 | (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; | ||
2466 | if (host->timeout_clk == 0) { | ||
2467 | if (host->ops->get_timeout_clock) { | ||
2468 | host->timeout_clk = host->ops->get_timeout_clock(host); | ||
2469 | } else if (!(host->quirks & | ||
2470 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { | ||
2471 | printk(KERN_ERR | ||
2472 | "%s: Hardware doesn't specify timeout clock " | ||
2473 | "frequency.\n", mmc_hostname(mmc)); | ||
2474 | return -ENODEV; | ||
2475 | } | ||
2476 | } | ||
2477 | if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT) | ||
2478 | host->timeout_clk *= 1000; | ||
2479 | |||
2480 | /* | 2459 | /* |
2481 | * In case of Host Controller v3.00, find out whether clock | 2460 | * In case of Host Controller v3.00, find out whether clock |
2482 | * multiplier is supported. | 2461 | * multiplier is supported. |
@@ -2509,10 +2488,26 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2509 | } else | 2488 | } else |
2510 | mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; | 2489 | mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; |
2511 | 2490 | ||
2491 | host->timeout_clk = | ||
2492 | (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; | ||
2493 | if (host->timeout_clk == 0) { | ||
2494 | if (host->ops->get_timeout_clock) { | ||
2495 | host->timeout_clk = host->ops->get_timeout_clock(host); | ||
2496 | } else if (!(host->quirks & | ||
2497 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { | ||
2498 | printk(KERN_ERR | ||
2499 | "%s: Hardware doesn't specify timeout clock " | ||
2500 | "frequency.\n", mmc_hostname(mmc)); | ||
2501 | return -ENODEV; | ||
2502 | } | ||
2503 | } | ||
2504 | if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT) | ||
2505 | host->timeout_clk *= 1000; | ||
2506 | |||
2512 | if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) | 2507 | if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) |
2513 | mmc->max_discard_to = (1 << 27) / (mmc->f_max / 1000); | 2508 | host->timeout_clk = mmc->f_max / 1000; |
2514 | else | 2509 | |
2515 | mmc->max_discard_to = (1 << 27) / host->timeout_clk; | 2510 | mmc->max_discard_to = (1 << 27) / host->timeout_clk; |
2516 | 2511 | ||
2517 | mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; | 2512 | mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; |
2518 | 2513 | ||
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c index 774f6439d7ce..0c4a672f5db6 100644 --- a/drivers/mmc/host/sh_mobile_sdhi.c +++ b/drivers/mmc/host/sh_mobile_sdhi.c | |||
@@ -120,11 +120,11 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
120 | mmc_data->hclk = clk_get_rate(priv->clk); | 120 | mmc_data->hclk = clk_get_rate(priv->clk); |
121 | mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; | 121 | mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; |
122 | mmc_data->get_cd = sh_mobile_sdhi_get_cd; | 122 | mmc_data->get_cd = sh_mobile_sdhi_get_cd; |
123 | if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT) | ||
124 | mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; | ||
125 | mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; | 123 | mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; |
126 | if (p) { | 124 | if (p) { |
127 | mmc_data->flags = p->tmio_flags; | 125 | mmc_data->flags = p->tmio_flags; |
126 | if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT) | ||
127 | mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; | ||
128 | mmc_data->ocr_mask = p->tmio_ocr_mask; | 128 | mmc_data->ocr_mask = p->tmio_ocr_mask; |
129 | mmc_data->capabilities |= p->tmio_caps; | 129 | mmc_data->capabilities |= p->tmio_caps; |
130 | 130 | ||
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 8d185de90d20..44a9668c4b7a 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c | |||
@@ -27,7 +27,6 @@ | |||
27 | static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) | 27 | static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) |
28 | { | 28 | { |
29 | const struct mfd_cell *cell = mfd_get_cell(dev); | 29 | const struct mfd_cell *cell = mfd_get_cell(dev); |
30 | struct mmc_host *mmc = platform_get_drvdata(dev); | ||
31 | int ret; | 30 | int ret; |
32 | 31 | ||
33 | ret = tmio_mmc_host_suspend(&dev->dev); | 32 | ret = tmio_mmc_host_suspend(&dev->dev); |
@@ -42,7 +41,6 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) | |||
42 | static int tmio_mmc_resume(struct platform_device *dev) | 41 | static int tmio_mmc_resume(struct platform_device *dev) |
43 | { | 42 | { |
44 | const struct mfd_cell *cell = mfd_get_cell(dev); | 43 | const struct mfd_cell *cell = mfd_get_cell(dev); |
45 | struct mmc_host *mmc = platform_get_drvdata(dev); | ||
46 | int ret = 0; | 44 | int ret = 0; |
47 | 45 | ||
48 | /* Tell the MFD core we are ready to be enabled */ | 46 | /* Tell the MFD core we are ready to be enabled */ |
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h index 65b5b76cc379..64fbb0021825 100644 --- a/drivers/mtd/ubi/debug.h +++ b/drivers/mtd/ubi/debug.h | |||
@@ -181,7 +181,7 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi) | |||
181 | 181 | ||
182 | #define ubi_dbg_msg(fmt, ...) do { \ | 182 | #define ubi_dbg_msg(fmt, ...) do { \ |
183 | if (0) \ | 183 | if (0) \ |
184 | pr_debug(fmt "\n", ##__VA_ARGS__); \ | 184 | printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \ |
185 | } while (0) | 185 | } while (0) |
186 | 186 | ||
187 | #define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__) | 187 | #define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__) |
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index f7bbde9eb2cb..2adc294f512a 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/skbuff.h> | 46 | #include <linux/skbuff.h> |
47 | #include <linux/platform_device.h> | 47 | #include <linux/platform_device.h> |
48 | #include <linux/clk.h> | 48 | #include <linux/clk.h> |
49 | #include <linux/io.h> | ||
49 | 50 | ||
50 | #include <linux/can/dev.h> | 51 | #include <linux/can/dev.h> |
51 | #include <linux/can/error.h> | 52 | #include <linux/can/error.h> |
@@ -503,9 +504,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
503 | spin_unlock_irqrestore(&priv->mbx_lock, flags); | 504 | spin_unlock_irqrestore(&priv->mbx_lock, flags); |
504 | 505 | ||
505 | /* Prepare mailbox for transmission */ | 506 | /* Prepare mailbox for transmission */ |
507 | data = cf->can_dlc | (get_tx_head_prio(priv) << 8); | ||
506 | if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */ | 508 | if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */ |
507 | data |= HECC_CANMCF_RTR; | 509 | data |= HECC_CANMCF_RTR; |
508 | data |= get_tx_head_prio(priv) << 8; | ||
509 | hecc_write_mbx(priv, mbxno, HECC_CANMCF, data); | 510 | hecc_write_mbx(priv, mbxno, HECC_CANMCF, data); |
510 | 511 | ||
511 | if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */ | 512 | if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */ |
@@ -923,6 +924,7 @@ static int ti_hecc_probe(struct platform_device *pdev) | |||
923 | priv->can.do_get_state = ti_hecc_get_state; | 924 | priv->can.do_get_state = ti_hecc_get_state; |
924 | priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; | 925 | priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; |
925 | 926 | ||
927 | spin_lock_init(&priv->mbx_lock); | ||
926 | ndev->irq = irq->start; | 928 | ndev->irq = irq->start; |
927 | ndev->flags |= IFF_ECHO; | 929 | ndev->flags |= IFF_ECHO; |
928 | platform_set_drvdata(pdev, ndev); | 930 | platform_set_drvdata(pdev, ndev); |
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index bc3bd34c43f1..6715bf54f04e 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c | |||
@@ -427,6 +427,7 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
427 | dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); | 427 | dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); |
428 | 428 | ||
429 | status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); | 429 | status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); |
430 | greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN; | ||
430 | 431 | ||
431 | /* Wrap around descriptor ring */ | 432 | /* Wrap around descriptor ring */ |
432 | if (greth->tx_next == GRETH_TXBD_NUM_MASK) { | 433 | if (greth->tx_next == GRETH_TXBD_NUM_MASK) { |
@@ -489,7 +490,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
489 | if (nr_frags != 0) | 490 | if (nr_frags != 0) |
490 | status = GRETH_TXBD_MORE; | 491 | status = GRETH_TXBD_MORE; |
491 | 492 | ||
492 | status |= GRETH_TXBD_CSALL; | 493 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
494 | status |= GRETH_TXBD_CSALL; | ||
493 | status |= skb_headlen(skb) & GRETH_BD_LEN; | 495 | status |= skb_headlen(skb) & GRETH_BD_LEN; |
494 | if (greth->tx_next == GRETH_TXBD_NUM_MASK) | 496 | if (greth->tx_next == GRETH_TXBD_NUM_MASK) |
495 | status |= GRETH_BD_WR; | 497 | status |= GRETH_BD_WR; |
@@ -512,7 +514,9 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
512 | greth->tx_skbuff[curr_tx] = NULL; | 514 | greth->tx_skbuff[curr_tx] = NULL; |
513 | bdp = greth->tx_bd_base + curr_tx; | 515 | bdp = greth->tx_bd_base + curr_tx; |
514 | 516 | ||
515 | status = GRETH_TXBD_CSALL | GRETH_BD_EN; | 517 | status = GRETH_BD_EN; |
518 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
519 | status |= GRETH_TXBD_CSALL; | ||
516 | status |= frag->size & GRETH_BD_LEN; | 520 | status |= frag->size & GRETH_BD_LEN; |
517 | 521 | ||
518 | /* Wrap around descriptor ring */ | 522 | /* Wrap around descriptor ring */ |
@@ -637,6 +641,7 @@ static void greth_clean_tx(struct net_device *dev) | |||
637 | dev->stats.tx_fifo_errors++; | 641 | dev->stats.tx_fifo_errors++; |
638 | } | 642 | } |
639 | dev->stats.tx_packets++; | 643 | dev->stats.tx_packets++; |
644 | dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last]; | ||
640 | greth->tx_last = NEXT_TX(greth->tx_last); | 645 | greth->tx_last = NEXT_TX(greth->tx_last); |
641 | greth->tx_free++; | 646 | greth->tx_free++; |
642 | } | 647 | } |
@@ -691,6 +696,7 @@ static void greth_clean_tx_gbit(struct net_device *dev) | |||
691 | greth->tx_skbuff[greth->tx_last] = NULL; | 696 | greth->tx_skbuff[greth->tx_last] = NULL; |
692 | 697 | ||
693 | greth_update_tx_stats(dev, stat); | 698 | greth_update_tx_stats(dev, stat); |
699 | dev->stats.tx_bytes += skb->len; | ||
694 | 700 | ||
695 | bdp = greth->tx_bd_base + greth->tx_last; | 701 | bdp = greth->tx_bd_base + greth->tx_last; |
696 | 702 | ||
@@ -792,6 +798,7 @@ static int greth_rx(struct net_device *dev, int limit) | |||
792 | memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len); | 798 | memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len); |
793 | 799 | ||
794 | skb->protocol = eth_type_trans(skb, dev); | 800 | skb->protocol = eth_type_trans(skb, dev); |
801 | dev->stats.rx_bytes += pkt_len; | ||
795 | dev->stats.rx_packets++; | 802 | dev->stats.rx_packets++; |
796 | netif_receive_skb(skb); | 803 | netif_receive_skb(skb); |
797 | } | 804 | } |
@@ -906,6 +913,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit) | |||
906 | 913 | ||
907 | skb->protocol = eth_type_trans(skb, dev); | 914 | skb->protocol = eth_type_trans(skb, dev); |
908 | dev->stats.rx_packets++; | 915 | dev->stats.rx_packets++; |
916 | dev->stats.rx_bytes += pkt_len; | ||
909 | netif_receive_skb(skb); | 917 | netif_receive_skb(skb); |
910 | 918 | ||
911 | greth->rx_skbuff[greth->rx_cur] = newskb; | 919 | greth->rx_skbuff[greth->rx_cur] = newskb; |
diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h index 9a0040dee4da..232a622a85b7 100644 --- a/drivers/net/ethernet/aeroflex/greth.h +++ b/drivers/net/ethernet/aeroflex/greth.h | |||
@@ -103,6 +103,7 @@ struct greth_private { | |||
103 | 103 | ||
104 | unsigned char *tx_bufs[GRETH_TXBD_NUM]; | 104 | unsigned char *tx_bufs[GRETH_TXBD_NUM]; |
105 | unsigned char *rx_bufs[GRETH_RXBD_NUM]; | 105 | unsigned char *rx_bufs[GRETH_RXBD_NUM]; |
106 | u16 tx_bufs_length[GRETH_TXBD_NUM]; | ||
106 | 107 | ||
107 | u16 tx_next; | 108 | u16 tx_next; |
108 | u16 tx_last; | 109 | u16 tx_last; |
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c index c2b630c5e852..7d5ded80d2d7 100644 --- a/drivers/net/ethernet/amd/am79c961a.c +++ b/drivers/net/ethernet/amd/am79c961a.c | |||
@@ -308,8 +308,11 @@ static void am79c961_timer(unsigned long data) | |||
308 | struct net_device *dev = (struct net_device *)data; | 308 | struct net_device *dev = (struct net_device *)data; |
309 | struct dev_priv *priv = netdev_priv(dev); | 309 | struct dev_priv *priv = netdev_priv(dev); |
310 | unsigned int lnkstat, carrier; | 310 | unsigned int lnkstat, carrier; |
311 | unsigned long flags; | ||
311 | 312 | ||
313 | spin_lock_irqsave(&priv->chip_lock, flags); | ||
312 | lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; | 314 | lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; |
315 | spin_unlock_irqrestore(&priv->chip_lock, flags); | ||
313 | carrier = netif_carrier_ok(dev); | 316 | carrier = netif_carrier_ok(dev); |
314 | 317 | ||
315 | if (lnkstat && !carrier) { | 318 | if (lnkstat && !carrier) { |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index f127768e4e83..2f92487724c6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -310,6 +310,14 @@ union db_prod { | |||
310 | u32 raw; | 310 | u32 raw; |
311 | }; | 311 | }; |
312 | 312 | ||
313 | /* dropless fc FW/HW related params */ | ||
314 | #define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512) | ||
315 | #define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \ | ||
316 | ETH_MAX_AGGREGATION_QUEUES_E1 :\ | ||
317 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2) | ||
318 | #define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp)) | ||
319 | #define FW_PREFETCH_CNT 16 | ||
320 | #define DROPLESS_FC_HEADROOM 100 | ||
313 | 321 | ||
314 | /* MC hsi */ | 322 | /* MC hsi */ |
315 | #define BCM_PAGE_SHIFT 12 | 323 | #define BCM_PAGE_SHIFT 12 |
@@ -326,15 +334,35 @@ union db_prod { | |||
326 | /* SGE ring related macros */ | 334 | /* SGE ring related macros */ |
327 | #define NUM_RX_SGE_PAGES 2 | 335 | #define NUM_RX_SGE_PAGES 2 |
328 | #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) | 336 | #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) |
329 | #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) | 337 | #define NEXT_PAGE_SGE_DESC_CNT 2 |
338 | #define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT) | ||
330 | /* RX_SGE_CNT is promised to be a power of 2 */ | 339 | /* RX_SGE_CNT is promised to be a power of 2 */ |
331 | #define RX_SGE_MASK (RX_SGE_CNT - 1) | 340 | #define RX_SGE_MASK (RX_SGE_CNT - 1) |
332 | #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) | 341 | #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) |
333 | #define MAX_RX_SGE (NUM_RX_SGE - 1) | 342 | #define MAX_RX_SGE (NUM_RX_SGE - 1) |
334 | #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ | 343 | #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ |
335 | (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) | 344 | (MAX_RX_SGE_CNT - 1)) ? \ |
345 | (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \ | ||
346 | (x) + 1) | ||
336 | #define RX_SGE(x) ((x) & MAX_RX_SGE) | 347 | #define RX_SGE(x) ((x) & MAX_RX_SGE) |
337 | 348 | ||
349 | /* | ||
350 | * Number of required SGEs is the sum of two: | ||
351 | * 1. Number of possible opened aggregations (next packet for | ||
352 | * these aggregations will probably consume SGE immidiatelly) | ||
353 | * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only | ||
354 | * after placement on BD for new TPA aggregation) | ||
355 | * | ||
356 | * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page | ||
357 | */ | ||
358 | #define NUM_SGE_REQ (MAX_AGG_QS(bp) + \ | ||
359 | (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2) | ||
360 | #define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \ | ||
361 | MAX_RX_SGE_CNT) | ||
362 | #define SGE_TH_LO(bp) (NUM_SGE_REQ + \ | ||
363 | NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT) | ||
364 | #define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM) | ||
365 | |||
338 | /* Manipulate a bit vector defined as an array of u64 */ | 366 | /* Manipulate a bit vector defined as an array of u64 */ |
339 | 367 | ||
340 | /* Number of bits in one sge_mask array element */ | 368 | /* Number of bits in one sge_mask array element */ |
@@ -546,24 +574,43 @@ struct bnx2x_fastpath { | |||
546 | 574 | ||
547 | #define NUM_TX_RINGS 16 | 575 | #define NUM_TX_RINGS 16 |
548 | #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) | 576 | #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) |
549 | #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) | 577 | #define NEXT_PAGE_TX_DESC_CNT 1 |
578 | #define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT) | ||
550 | #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) | 579 | #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) |
551 | #define MAX_TX_BD (NUM_TX_BD - 1) | 580 | #define MAX_TX_BD (NUM_TX_BD - 1) |
552 | #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) | 581 | #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) |
553 | #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ | 582 | #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ |
554 | (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) | 583 | (MAX_TX_DESC_CNT - 1)) ? \ |
584 | (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \ | ||
585 | (x) + 1) | ||
555 | #define TX_BD(x) ((x) & MAX_TX_BD) | 586 | #define TX_BD(x) ((x) & MAX_TX_BD) |
556 | #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) | 587 | #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) |
557 | 588 | ||
558 | /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ | 589 | /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ |
559 | #define NUM_RX_RINGS 8 | 590 | #define NUM_RX_RINGS 8 |
560 | #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) | 591 | #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) |
561 | #define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) | 592 | #define NEXT_PAGE_RX_DESC_CNT 2 |
593 | #define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT) | ||
562 | #define RX_DESC_MASK (RX_DESC_CNT - 1) | 594 | #define RX_DESC_MASK (RX_DESC_CNT - 1) |
563 | #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) | 595 | #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) |
564 | #define MAX_RX_BD (NUM_RX_BD - 1) | 596 | #define MAX_RX_BD (NUM_RX_BD - 1) |
565 | #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) | 597 | #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) |
566 | #define MIN_RX_AVAIL 128 | 598 | |
599 | /* dropless fc calculations for BDs | ||
600 | * | ||
601 | * Number of BDs should as number of buffers in BRB: | ||
602 | * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT | ||
603 | * "next" elements on each page | ||
604 | */ | ||
605 | #define NUM_BD_REQ BRB_SIZE(bp) | ||
606 | #define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \ | ||
607 | MAX_RX_DESC_CNT) | ||
608 | #define BD_TH_LO(bp) (NUM_BD_REQ + \ | ||
609 | NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \ | ||
610 | FW_DROP_LEVEL(bp)) | ||
611 | #define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM) | ||
612 | |||
613 | #define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128) | ||
567 | 614 | ||
568 | #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ | 615 | #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ |
569 | ETH_MIN_RX_CQES_WITH_TPA_E1 : \ | 616 | ETH_MIN_RX_CQES_WITH_TPA_E1 : \ |
@@ -574,7 +621,9 @@ struct bnx2x_fastpath { | |||
574 | MIN_RX_AVAIL)) | 621 | MIN_RX_AVAIL)) |
575 | 622 | ||
576 | #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ | 623 | #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ |
577 | (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) | 624 | (MAX_RX_DESC_CNT - 1)) ? \ |
625 | (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \ | ||
626 | (x) + 1) | ||
578 | #define RX_BD(x) ((x) & MAX_RX_BD) | 627 | #define RX_BD(x) ((x) & MAX_RX_BD) |
579 | 628 | ||
580 | /* | 629 | /* |
@@ -584,14 +633,31 @@ struct bnx2x_fastpath { | |||
584 | #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) | 633 | #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) |
585 | #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) | 634 | #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) |
586 | #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) | 635 | #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) |
587 | #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) | 636 | #define NEXT_PAGE_RCQ_DESC_CNT 1 |
637 | #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT) | ||
588 | #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) | 638 | #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) |
589 | #define MAX_RCQ_BD (NUM_RCQ_BD - 1) | 639 | #define MAX_RCQ_BD (NUM_RCQ_BD - 1) |
590 | #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) | 640 | #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) |
591 | #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ | 641 | #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ |
592 | (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) | 642 | (MAX_RCQ_DESC_CNT - 1)) ? \ |
643 | (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \ | ||
644 | (x) + 1) | ||
593 | #define RCQ_BD(x) ((x) & MAX_RCQ_BD) | 645 | #define RCQ_BD(x) ((x) & MAX_RCQ_BD) |
594 | 646 | ||
647 | /* dropless fc calculations for RCQs | ||
648 | * | ||
649 | * Number of RCQs should be as number of buffers in BRB: | ||
650 | * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT | ||
651 | * "next" elements on each page | ||
652 | */ | ||
653 | #define NUM_RCQ_REQ BRB_SIZE(bp) | ||
654 | #define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \ | ||
655 | MAX_RCQ_DESC_CNT) | ||
656 | #define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \ | ||
657 | NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \ | ||
658 | FW_DROP_LEVEL(bp)) | ||
659 | #define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) | ||
660 | |||
595 | 661 | ||
596 | /* This is needed for determining of last_max */ | 662 | /* This is needed for determining of last_max */ |
597 | #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) | 663 | #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) |
@@ -680,24 +746,17 @@ struct bnx2x_fastpath { | |||
680 | #define FP_CSB_FUNC_OFF \ | 746 | #define FP_CSB_FUNC_OFF \ |
681 | offsetof(struct cstorm_status_block_c, func) | 747 | offsetof(struct cstorm_status_block_c, func) |
682 | 748 | ||
683 | #define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ | 749 | #define HC_INDEX_ETH_RX_CQ_CONS 1 |
684 | /* (HC_INDEX_U_TOE_RX_CQ_CONS) */ | ||
685 | #define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */ | ||
686 | /* (HC_INDEX_U_ETH_RX_CQ_CONS) */ | ||
687 | #define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */ | ||
688 | /* (HC_INDEX_U_ETH_RX_BD_CONS) */ | ||
689 | |||
690 | #define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */ | ||
691 | /* (HC_INDEX_C_TOE_TX_CQ_CONS) */ | ||
692 | #define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */ | ||
693 | /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ | ||
694 | #define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */ | ||
695 | /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ | ||
696 | #define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */ | ||
697 | /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ | ||
698 | 750 | ||
699 | #define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 | 751 | #define HC_INDEX_OOO_TX_CQ_CONS 4 |
700 | 752 | ||
753 | #define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 | ||
754 | |||
755 | #define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 | ||
756 | |||
757 | #define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 | ||
758 | |||
759 | #define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 | ||
701 | 760 | ||
702 | #define BNX2X_RX_SB_INDEX \ | 761 | #define BNX2X_RX_SB_INDEX \ |
703 | (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) | 762 | (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) |
@@ -1095,11 +1154,12 @@ struct bnx2x { | |||
1095 | #define BP_PORT(bp) (bp->pfid & 1) | 1154 | #define BP_PORT(bp) (bp->pfid & 1) |
1096 | #define BP_FUNC(bp) (bp->pfid) | 1155 | #define BP_FUNC(bp) (bp->pfid) |
1097 | #define BP_ABS_FUNC(bp) (bp->pf_num) | 1156 | #define BP_ABS_FUNC(bp) (bp->pf_num) |
1098 | #define BP_E1HVN(bp) (bp->pfid >> 1) | 1157 | #define BP_VN(bp) ((bp)->pfid >> 1) |
1099 | #define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ | 1158 | #define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4) |
1100 | #define BP_L_ID(bp) (BP_E1HVN(bp) << 2) | 1159 | #define BP_L_ID(bp) (BP_VN(bp) << 2) |
1101 | #define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ | 1160 | #define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\ |
1102 | BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) | 1161 | (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) |
1162 | #define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) | ||
1103 | 1163 | ||
1104 | struct net_device *dev; | 1164 | struct net_device *dev; |
1105 | struct pci_dev *pdev; | 1165 | struct pci_dev *pdev; |
@@ -1762,7 +1822,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1762 | 1822 | ||
1763 | #define MAX_DMAE_C_PER_PORT 8 | 1823 | #define MAX_DMAE_C_PER_PORT 8 |
1764 | #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ | 1824 | #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ |
1765 | BP_E1HVN(bp)) | 1825 | BP_VN(bp)) |
1766 | #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ | 1826 | #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ |
1767 | E1HVN_MAX) | 1827 | E1HVN_MAX) |
1768 | 1828 | ||
@@ -1788,7 +1848,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1788 | 1848 | ||
1789 | /* must be used on a CID before placing it on a HW ring */ | 1849 | /* must be used on a CID before placing it on a HW ring */ |
1790 | #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ | 1850 | #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ |
1791 | (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ | 1851 | (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \ |
1792 | (x)) | 1852 | (x)) |
1793 | 1853 | ||
1794 | #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) | 1854 | #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 5c3eb17c4f4a..e575e89c7d46 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -993,8 +993,6 @@ void __bnx2x_link_report(struct bnx2x *bp) | |||
993 | void bnx2x_init_rx_rings(struct bnx2x *bp) | 993 | void bnx2x_init_rx_rings(struct bnx2x *bp) |
994 | { | 994 | { |
995 | int func = BP_FUNC(bp); | 995 | int func = BP_FUNC(bp); |
996 | int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
997 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2; | ||
998 | u16 ring_prod; | 996 | u16 ring_prod; |
999 | int i, j; | 997 | int i, j; |
1000 | 998 | ||
@@ -1007,7 +1005,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
1007 | 1005 | ||
1008 | if (!fp->disable_tpa) { | 1006 | if (!fp->disable_tpa) { |
1009 | /* Fill the per-aggregtion pool */ | 1007 | /* Fill the per-aggregtion pool */ |
1010 | for (i = 0; i < max_agg_queues; i++) { | 1008 | for (i = 0; i < MAX_AGG_QS(bp); i++) { |
1011 | struct bnx2x_agg_info *tpa_info = | 1009 | struct bnx2x_agg_info *tpa_info = |
1012 | &fp->tpa_info[i]; | 1010 | &fp->tpa_info[i]; |
1013 | struct sw_rx_bd *first_buf = | 1011 | struct sw_rx_bd *first_buf = |
@@ -1047,7 +1045,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
1047 | bnx2x_free_rx_sge_range(bp, fp, | 1045 | bnx2x_free_rx_sge_range(bp, fp, |
1048 | ring_prod); | 1046 | ring_prod); |
1049 | bnx2x_free_tpa_pool(bp, fp, | 1047 | bnx2x_free_tpa_pool(bp, fp, |
1050 | max_agg_queues); | 1048 | MAX_AGG_QS(bp)); |
1051 | fp->disable_tpa = 1; | 1049 | fp->disable_tpa = 1; |
1052 | ring_prod = 0; | 1050 | ring_prod = 0; |
1053 | break; | 1051 | break; |
@@ -1143,9 +1141,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
1143 | bnx2x_free_rx_bds(fp); | 1141 | bnx2x_free_rx_bds(fp); |
1144 | 1142 | ||
1145 | if (!fp->disable_tpa) | 1143 | if (!fp->disable_tpa) |
1146 | bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? | 1144 | bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); |
1147 | ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
1148 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2); | ||
1149 | } | 1145 | } |
1150 | } | 1146 | } |
1151 | 1147 | ||
@@ -3100,15 +3096,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) | |||
3100 | struct bnx2x_fastpath *fp = &bp->fp[index]; | 3096 | struct bnx2x_fastpath *fp = &bp->fp[index]; |
3101 | int ring_size = 0; | 3097 | int ring_size = 0; |
3102 | u8 cos; | 3098 | u8 cos; |
3099 | int rx_ring_size = 0; | ||
3103 | 3100 | ||
3104 | /* if rx_ring_size specified - use it */ | 3101 | /* if rx_ring_size specified - use it */ |
3105 | int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : | 3102 | if (!bp->rx_ring_size) { |
3106 | MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); | ||
3107 | 3103 | ||
3108 | /* allocate at least number of buffers required by FW */ | 3104 | rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); |
3109 | rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : | 3105 | |
3110 | MIN_RX_SIZE_TPA, | 3106 | /* allocate at least number of buffers required by FW */ |
3111 | rx_ring_size); | 3107 | rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : |
3108 | MIN_RX_SIZE_TPA, rx_ring_size); | ||
3109 | |||
3110 | bp->rx_ring_size = rx_ring_size; | ||
3111 | } else | ||
3112 | rx_ring_size = bp->rx_ring_size; | ||
3112 | 3113 | ||
3113 | /* Common */ | 3114 | /* Common */ |
3114 | sb = &bnx2x_fp(bp, index, status_blk); | 3115 | sb = &bnx2x_fp(bp, index, status_blk); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index ce14f11c0de5..a49f8cfa2dc6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | |||
@@ -366,13 +366,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
366 | } | 366 | } |
367 | 367 | ||
368 | /* advertise the requested speed and duplex if supported */ | 368 | /* advertise the requested speed and duplex if supported */ |
369 | cmd->advertising &= bp->port.supported[cfg_idx]; | 369 | if (cmd->advertising & ~(bp->port.supported[cfg_idx])) { |
370 | DP(NETIF_MSG_LINK, "Advertisement parameters " | ||
371 | "are not supported\n"); | ||
372 | return -EINVAL; | ||
373 | } | ||
370 | 374 | ||
371 | bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; | 375 | bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; |
372 | bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL; | 376 | bp->link_params.req_duplex[cfg_idx] = cmd->duplex; |
373 | bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg | | 377 | bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg | |
374 | cmd->advertising); | 378 | cmd->advertising); |
379 | if (cmd->advertising) { | ||
380 | |||
381 | bp->link_params.speed_cap_mask[cfg_idx] = 0; | ||
382 | if (cmd->advertising & ADVERTISED_10baseT_Half) { | ||
383 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
384 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF; | ||
385 | } | ||
386 | if (cmd->advertising & ADVERTISED_10baseT_Full) | ||
387 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
388 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL; | ||
375 | 389 | ||
390 | if (cmd->advertising & ADVERTISED_100baseT_Full) | ||
391 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
392 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL; | ||
393 | |||
394 | if (cmd->advertising & ADVERTISED_100baseT_Half) { | ||
395 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
396 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF; | ||
397 | } | ||
398 | if (cmd->advertising & ADVERTISED_1000baseT_Half) { | ||
399 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
400 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; | ||
401 | } | ||
402 | if (cmd->advertising & (ADVERTISED_1000baseT_Full | | ||
403 | ADVERTISED_1000baseKX_Full)) | ||
404 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
405 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; | ||
406 | |||
407 | if (cmd->advertising & (ADVERTISED_10000baseT_Full | | ||
408 | ADVERTISED_10000baseKX4_Full | | ||
409 | ADVERTISED_10000baseKR_Full)) | ||
410 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
411 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; | ||
412 | } | ||
376 | } else { /* forced speed */ | 413 | } else { /* forced speed */ |
377 | /* advertise the requested speed and duplex if supported */ | 414 | /* advertise the requested speed and duplex if supported */ |
378 | switch (speed) { | 415 | switch (speed) { |
@@ -1313,10 +1350,7 @@ static void bnx2x_get_ringparam(struct net_device *dev, | |||
1313 | if (bp->rx_ring_size) | 1350 | if (bp->rx_ring_size) |
1314 | ering->rx_pending = bp->rx_ring_size; | 1351 | ering->rx_pending = bp->rx_ring_size; |
1315 | else | 1352 | else |
1316 | if (bp->state == BNX2X_STATE_OPEN && bp->num_queues) | 1353 | ering->rx_pending = MAX_RX_AVAIL; |
1317 | ering->rx_pending = MAX_RX_AVAIL/bp->num_queues; | ||
1318 | else | ||
1319 | ering->rx_pending = MAX_RX_AVAIL; | ||
1320 | 1354 | ||
1321 | ering->rx_mini_pending = 0; | 1355 | ering->rx_mini_pending = 0; |
1322 | ering->rx_jumbo_pending = 0; | 1356 | ering->rx_jumbo_pending = 0; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 8e9b87be3002..818723c9e678 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
@@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, | |||
778 | { | 778 | { |
779 | u32 nig_reg_adress_crd_weight = 0; | 779 | u32 nig_reg_adress_crd_weight = 0; |
780 | u32 pbf_reg_adress_crd_weight = 0; | 780 | u32 pbf_reg_adress_crd_weight = 0; |
781 | /* Calculate and set BW for this COS*/ | 781 | /* Calculate and set BW for this COS - use 1 instead of 0 for BW */ |
782 | const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; | 782 | const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw; |
783 | const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; | 783 | const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw; |
784 | 784 | ||
785 | switch (cos_entry) { | 785 | switch (cos_entry) { |
786 | case 0: | 786 | case 0: |
@@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw( | |||
852 | /* Calculate total BW requested */ | 852 | /* Calculate total BW requested */ |
853 | for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { | 853 | for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { |
854 | if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { | 854 | if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { |
855 | 855 | *total_bw += | |
856 | if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { | 856 | ets_params->cos[cos_idx].params.bw_params.bw; |
857 | DP(NETIF_MSG_LINK, | ||
858 | "bnx2x_ets_E3B0_config BW was set to 0\n"); | ||
859 | return -EINVAL; | ||
860 | } | 857 | } |
861 | *total_bw += | ||
862 | ets_params->cos[cos_idx].params.bw_params.bw; | ||
863 | } | ||
864 | } | 858 | } |
865 | 859 | ||
866 | /*Check taotl BW is valid */ | 860 | /* Check total BW is valid */ |
867 | if ((100 != *total_bw) || (0 == *total_bw)) { | 861 | if ((100 != *total_bw) || (0 == *total_bw)) { |
868 | if (0 == *total_bw) { | 862 | if (0 == *total_bw) { |
869 | DP(NETIF_MSG_LINK, | 863 | DP(NETIF_MSG_LINK, |
@@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params, | |||
1726 | 1720 | ||
1727 | /* Check loopback mode */ | 1721 | /* Check loopback mode */ |
1728 | if (lb) | 1722 | if (lb) |
1729 | val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; | 1723 | val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK; |
1730 | REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); | 1724 | REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); |
1731 | bnx2x_set_xumac_nig(params, | 1725 | bnx2x_set_xumac_nig(params, |
1732 | ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); | 1726 | ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); |
@@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, | |||
3630 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, | 3624 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, |
3631 | MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); | 3625 | MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); |
3632 | 3626 | ||
3627 | /* Advertised and set FEC (Forward Error Correction) */ | ||
3628 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, | ||
3629 | MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2, | ||
3630 | (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY | | ||
3631 | MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ)); | ||
3632 | |||
3633 | /* Enable CL37 BAM */ | 3633 | /* Enable CL37 BAM */ |
3634 | if (REG_RD(bp, params->shmem_base + | 3634 | if (REG_RD(bp, params->shmem_base + |
3635 | offsetof(struct shmem_region, dev_info. | 3635 | offsetof(struct shmem_region, dev_info. |
@@ -5925,7 +5925,7 @@ int bnx2x_set_led(struct link_params *params, | |||
5925 | (tmp | EMAC_LED_OVERRIDE)); | 5925 | (tmp | EMAC_LED_OVERRIDE)); |
5926 | /* | 5926 | /* |
5927 | * return here without enabling traffic | 5927 | * return here without enabling traffic |
5928 | * LED blink andsetting rate in ON mode. | 5928 | * LED blink and setting rate in ON mode. |
5929 | * In oper mode, enabling LED blink | 5929 | * In oper mode, enabling LED blink |
5930 | * and setting rate is needed. | 5930 | * and setting rate is needed. |
5931 | */ | 5931 | */ |
@@ -5937,7 +5937,11 @@ int bnx2x_set_led(struct link_params *params, | |||
5937 | * This is a work-around for HW issue found when link | 5937 | * This is a work-around for HW issue found when link |
5938 | * is up in CL73 | 5938 | * is up in CL73 |
5939 | */ | 5939 | */ |
5940 | REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); | 5940 | if ((!CHIP_IS_E3(bp)) || |
5941 | (CHIP_IS_E3(bp) && | ||
5942 | mode == LED_MODE_ON)) | ||
5943 | REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); | ||
5944 | |||
5941 | if (CHIP_IS_E1x(bp) || | 5945 | if (CHIP_IS_E1x(bp) || |
5942 | CHIP_IS_E2(bp) || | 5946 | CHIP_IS_E2(bp) || |
5943 | (mode == LED_MODE_ON)) | 5947 | (mode == LED_MODE_ON)) |
@@ -10644,8 +10648,7 @@ static struct bnx2x_phy phy_warpcore = { | |||
10644 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, | 10648 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, |
10645 | .addr = 0xff, | 10649 | .addr = 0xff, |
10646 | .def_md_devad = 0, | 10650 | .def_md_devad = 0, |
10647 | .flags = (FLAGS_HW_LOCK_REQUIRED | | 10651 | .flags = FLAGS_HW_LOCK_REQUIRED, |
10648 | FLAGS_TX_ERROR_CHECK), | ||
10649 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10652 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10650 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10653 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10651 | .mdio_ctrl = 0, | 10654 | .mdio_ctrl = 0, |
@@ -10771,8 +10774,7 @@ static struct bnx2x_phy phy_8706 = { | |||
10771 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, | 10774 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, |
10772 | .addr = 0xff, | 10775 | .addr = 0xff, |
10773 | .def_md_devad = 0, | 10776 | .def_md_devad = 0, |
10774 | .flags = (FLAGS_INIT_XGXS_FIRST | | 10777 | .flags = FLAGS_INIT_XGXS_FIRST, |
10775 | FLAGS_TX_ERROR_CHECK), | ||
10776 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10778 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10777 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10779 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10778 | .mdio_ctrl = 0, | 10780 | .mdio_ctrl = 0, |
@@ -10803,8 +10805,7 @@ static struct bnx2x_phy phy_8726 = { | |||
10803 | .addr = 0xff, | 10805 | .addr = 0xff, |
10804 | .def_md_devad = 0, | 10806 | .def_md_devad = 0, |
10805 | .flags = (FLAGS_HW_LOCK_REQUIRED | | 10807 | .flags = (FLAGS_HW_LOCK_REQUIRED | |
10806 | FLAGS_INIT_XGXS_FIRST | | 10808 | FLAGS_INIT_XGXS_FIRST), |
10807 | FLAGS_TX_ERROR_CHECK), | ||
10808 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10809 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10809 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10810 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10810 | .mdio_ctrl = 0, | 10811 | .mdio_ctrl = 0, |
@@ -10835,8 +10836,7 @@ static struct bnx2x_phy phy_8727 = { | |||
10835 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, | 10836 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, |
10836 | .addr = 0xff, | 10837 | .addr = 0xff, |
10837 | .def_md_devad = 0, | 10838 | .def_md_devad = 0, |
10838 | .flags = (FLAGS_FAN_FAILURE_DET_REQ | | 10839 | .flags = FLAGS_FAN_FAILURE_DET_REQ, |
10839 | FLAGS_TX_ERROR_CHECK), | ||
10840 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10840 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10841 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10841 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10842 | .mdio_ctrl = 0, | 10842 | .mdio_ctrl = 0, |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 85dd294aeaba..621ab281ed89 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -408,8 +408,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, | |||
408 | opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); | 408 | opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); |
409 | 409 | ||
410 | opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); | 410 | opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); |
411 | opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | | 411 | opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | |
412 | (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); | 412 | (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); |
413 | opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); | 413 | opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); |
414 | 414 | ||
415 | #ifdef __BIG_ENDIAN | 415 | #ifdef __BIG_ENDIAN |
@@ -1417,7 +1417,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) | |||
1417 | if (!CHIP_IS_E1(bp)) { | 1417 | if (!CHIP_IS_E1(bp)) { |
1418 | /* init leading/trailing edge */ | 1418 | /* init leading/trailing edge */ |
1419 | if (IS_MF(bp)) { | 1419 | if (IS_MF(bp)) { |
1420 | val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); | 1420 | val = (0xee0f | (1 << (BP_VN(bp) + 4))); |
1421 | if (bp->port.pmf) | 1421 | if (bp->port.pmf) |
1422 | /* enable nig and gpio3 attention */ | 1422 | /* enable nig and gpio3 attention */ |
1423 | val |= 0x1100; | 1423 | val |= 0x1100; |
@@ -1469,7 +1469,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp) | |||
1469 | 1469 | ||
1470 | /* init leading/trailing edge */ | 1470 | /* init leading/trailing edge */ |
1471 | if (IS_MF(bp)) { | 1471 | if (IS_MF(bp)) { |
1472 | val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); | 1472 | val = (0xee0f | (1 << (BP_VN(bp) + 4))); |
1473 | if (bp->port.pmf) | 1473 | if (bp->port.pmf) |
1474 | /* enable nig and gpio3 attention */ | 1474 | /* enable nig and gpio3 attention */ |
1475 | val |= 0x1100; | 1475 | val |= 0x1100; |
@@ -2285,7 +2285,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) | |||
2285 | int vn; | 2285 | int vn; |
2286 | 2286 | ||
2287 | bp->vn_weight_sum = 0; | 2287 | bp->vn_weight_sum = 0; |
2288 | for (vn = VN_0; vn < E1HVN_MAX; vn++) { | 2288 | for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { |
2289 | u32 vn_cfg = bp->mf_config[vn]; | 2289 | u32 vn_cfg = bp->mf_config[vn]; |
2290 | u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> | 2290 | u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> |
2291 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; | 2291 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; |
@@ -2318,12 +2318,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) | |||
2318 | CMNG_FLAGS_PER_PORT_FAIRNESS_VN; | 2318 | CMNG_FLAGS_PER_PORT_FAIRNESS_VN; |
2319 | } | 2319 | } |
2320 | 2320 | ||
2321 | /* returns func by VN for current port */ | ||
2322 | static inline int func_by_vn(struct bnx2x *bp, int vn) | ||
2323 | { | ||
2324 | return 2 * vn + BP_PORT(bp); | ||
2325 | } | ||
2326 | |||
2321 | static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) | 2327 | static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) |
2322 | { | 2328 | { |
2323 | struct rate_shaping_vars_per_vn m_rs_vn; | 2329 | struct rate_shaping_vars_per_vn m_rs_vn; |
2324 | struct fairness_vars_per_vn m_fair_vn; | 2330 | struct fairness_vars_per_vn m_fair_vn; |
2325 | u32 vn_cfg = bp->mf_config[vn]; | 2331 | u32 vn_cfg = bp->mf_config[vn]; |
2326 | int func = 2*vn + BP_PORT(bp); | 2332 | int func = func_by_vn(bp, vn); |
2327 | u16 vn_min_rate, vn_max_rate; | 2333 | u16 vn_min_rate, vn_max_rate; |
2328 | int i; | 2334 | int i; |
2329 | 2335 | ||
@@ -2420,7 +2426,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp) | |||
2420 | * | 2426 | * |
2421 | * and there are 2 functions per port | 2427 | * and there are 2 functions per port |
2422 | */ | 2428 | */ |
2423 | for (vn = VN_0; vn < E1HVN_MAX; vn++) { | 2429 | for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { |
2424 | int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); | 2430 | int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); |
2425 | 2431 | ||
2426 | if (func >= E1H_FUNC_MAX) | 2432 | if (func >= E1H_FUNC_MAX) |
@@ -2452,7 +2458,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) | |||
2452 | 2458 | ||
2453 | /* calculate and set min-max rate for each vn */ | 2459 | /* calculate and set min-max rate for each vn */ |
2454 | if (bp->port.pmf) | 2460 | if (bp->port.pmf) |
2455 | for (vn = VN_0; vn < E1HVN_MAX; vn++) | 2461 | for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) |
2456 | bnx2x_init_vn_minmax(bp, vn); | 2462 | bnx2x_init_vn_minmax(bp, vn); |
2457 | 2463 | ||
2458 | /* always enable rate shaping and fairness */ | 2464 | /* always enable rate shaping and fairness */ |
@@ -2471,16 +2477,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) | |||
2471 | 2477 | ||
2472 | static inline void bnx2x_link_sync_notify(struct bnx2x *bp) | 2478 | static inline void bnx2x_link_sync_notify(struct bnx2x *bp) |
2473 | { | 2479 | { |
2474 | int port = BP_PORT(bp); | ||
2475 | int func; | 2480 | int func; |
2476 | int vn; | 2481 | int vn; |
2477 | 2482 | ||
2478 | /* Set the attention towards other drivers on the same port */ | 2483 | /* Set the attention towards other drivers on the same port */ |
2479 | for (vn = VN_0; vn < E1HVN_MAX; vn++) { | 2484 | for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { |
2480 | if (vn == BP_E1HVN(bp)) | 2485 | if (vn == BP_VN(bp)) |
2481 | continue; | 2486 | continue; |
2482 | 2487 | ||
2483 | func = ((vn << 1) | port); | 2488 | func = func_by_vn(bp, vn); |
2484 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + | 2489 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + |
2485 | (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); | 2490 | (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); |
2486 | } | 2491 | } |
@@ -2575,7 +2580,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp) | |||
2575 | bnx2x_dcbx_pmf_update(bp); | 2580 | bnx2x_dcbx_pmf_update(bp); |
2576 | 2581 | ||
2577 | /* enable nig attention */ | 2582 | /* enable nig attention */ |
2578 | val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); | 2583 | val = (0xff0f | (1 << (BP_VN(bp) + 4))); |
2579 | if (bp->common.int_block == INT_BLOCK_HC) { | 2584 | if (bp->common.int_block == INT_BLOCK_HC) { |
2580 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); | 2585 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); |
2581 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); | 2586 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); |
@@ -2754,8 +2759,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, | |||
2754 | u16 tpa_agg_size = 0; | 2759 | u16 tpa_agg_size = 0; |
2755 | 2760 | ||
2756 | if (!fp->disable_tpa) { | 2761 | if (!fp->disable_tpa) { |
2757 | pause->sge_th_hi = 250; | 2762 | pause->sge_th_lo = SGE_TH_LO(bp); |
2758 | pause->sge_th_lo = 150; | 2763 | pause->sge_th_hi = SGE_TH_HI(bp); |
2764 | |||
2765 | /* validate SGE ring has enough to cross high threshold */ | ||
2766 | WARN_ON(bp->dropless_fc && | ||
2767 | pause->sge_th_hi + FW_PREFETCH_CNT > | ||
2768 | MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); | ||
2769 | |||
2759 | tpa_agg_size = min_t(u32, | 2770 | tpa_agg_size = min_t(u32, |
2760 | (min_t(u32, 8, MAX_SKB_FRAGS) * | 2771 | (min_t(u32, 8, MAX_SKB_FRAGS) * |
2761 | SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); | 2772 | SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); |
@@ -2769,10 +2780,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, | |||
2769 | 2780 | ||
2770 | /* pause - not for e1 */ | 2781 | /* pause - not for e1 */ |
2771 | if (!CHIP_IS_E1(bp)) { | 2782 | if (!CHIP_IS_E1(bp)) { |
2772 | pause->bd_th_hi = 350; | 2783 | pause->bd_th_lo = BD_TH_LO(bp); |
2773 | pause->bd_th_lo = 250; | 2784 | pause->bd_th_hi = BD_TH_HI(bp); |
2774 | pause->rcq_th_hi = 350; | 2785 | |
2775 | pause->rcq_th_lo = 250; | 2786 | pause->rcq_th_lo = RCQ_TH_LO(bp); |
2787 | pause->rcq_th_hi = RCQ_TH_HI(bp); | ||
2788 | /* | ||
2789 | * validate that rings have enough entries to cross | ||
2790 | * high thresholds | ||
2791 | */ | ||
2792 | WARN_ON(bp->dropless_fc && | ||
2793 | pause->bd_th_hi + FW_PREFETCH_CNT > | ||
2794 | bp->rx_ring_size); | ||
2795 | WARN_ON(bp->dropless_fc && | ||
2796 | pause->rcq_th_hi + FW_PREFETCH_CNT > | ||
2797 | NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); | ||
2776 | 2798 | ||
2777 | pause->pri_map = 1; | 2799 | pause->pri_map = 1; |
2778 | } | 2800 | } |
@@ -2800,9 +2822,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, | |||
2800 | * For PF Clients it should be the maximum avaliable number. | 2822 | * For PF Clients it should be the maximum avaliable number. |
2801 | * VF driver(s) may want to define it to a smaller value. | 2823 | * VF driver(s) may want to define it to a smaller value. |
2802 | */ | 2824 | */ |
2803 | rxq_init->max_tpa_queues = | 2825 | rxq_init->max_tpa_queues = MAX_AGG_QS(bp); |
2804 | (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
2805 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2); | ||
2806 | 2826 | ||
2807 | rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; | 2827 | rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; |
2808 | rxq_init->fw_sb_id = fp->fw_sb_id; | 2828 | rxq_init->fw_sb_id = fp->fw_sb_id; |
@@ -4804,6 +4824,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, | |||
4804 | hc_sm->time_to_expire = 0xFFFFFFFF; | 4824 | hc_sm->time_to_expire = 0xFFFFFFFF; |
4805 | } | 4825 | } |
4806 | 4826 | ||
4827 | |||
4828 | /* allocates state machine ids. */ | ||
4829 | static inline | ||
4830 | void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) | ||
4831 | { | ||
4832 | /* zero out state machine indices */ | ||
4833 | /* rx indices */ | ||
4834 | index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; | ||
4835 | |||
4836 | /* tx indices */ | ||
4837 | index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; | ||
4838 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; | ||
4839 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; | ||
4840 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; | ||
4841 | |||
4842 | /* map indices */ | ||
4843 | /* rx indices */ | ||
4844 | index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= | ||
4845 | SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; | ||
4846 | |||
4847 | /* tx indices */ | ||
4848 | index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= | ||
4849 | SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; | ||
4850 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= | ||
4851 | SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; | ||
4852 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= | ||
4853 | SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; | ||
4854 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= | ||
4855 | SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; | ||
4856 | } | ||
4857 | |||
4807 | static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, | 4858 | static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, |
4808 | u8 vf_valid, int fw_sb_id, int igu_sb_id) | 4859 | u8 vf_valid, int fw_sb_id, int igu_sb_id) |
4809 | { | 4860 | { |
@@ -4835,6 +4886,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, | |||
4835 | hc_sm_p = sb_data_e2.common.state_machine; | 4886 | hc_sm_p = sb_data_e2.common.state_machine; |
4836 | sb_data_p = (u32 *)&sb_data_e2; | 4887 | sb_data_p = (u32 *)&sb_data_e2; |
4837 | data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); | 4888 | data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); |
4889 | bnx2x_map_sb_state_machines(sb_data_e2.index_data); | ||
4838 | } else { | 4890 | } else { |
4839 | memset(&sb_data_e1x, 0, | 4891 | memset(&sb_data_e1x, 0, |
4840 | sizeof(struct hc_status_block_data_e1x)); | 4892 | sizeof(struct hc_status_block_data_e1x)); |
@@ -4849,6 +4901,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, | |||
4849 | hc_sm_p = sb_data_e1x.common.state_machine; | 4901 | hc_sm_p = sb_data_e1x.common.state_machine; |
4850 | sb_data_p = (u32 *)&sb_data_e1x; | 4902 | sb_data_p = (u32 *)&sb_data_e1x; |
4851 | data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); | 4903 | data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); |
4904 | bnx2x_map_sb_state_machines(sb_data_e1x.index_data); | ||
4852 | } | 4905 | } |
4853 | 4906 | ||
4854 | bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], | 4907 | bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], |
@@ -5798,7 +5851,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) | |||
5798 | * take the UNDI lock to protect undi_unload flow from accessing | 5851 | * take the UNDI lock to protect undi_unload flow from accessing |
5799 | * registers while we're resetting the chip | 5852 | * registers while we're resetting the chip |
5800 | */ | 5853 | */ |
5801 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | 5854 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); |
5802 | 5855 | ||
5803 | bnx2x_reset_common(bp); | 5856 | bnx2x_reset_common(bp); |
5804 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); | 5857 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); |
@@ -5810,7 +5863,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) | |||
5810 | } | 5863 | } |
5811 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); | 5864 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); |
5812 | 5865 | ||
5813 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | 5866 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); |
5814 | 5867 | ||
5815 | bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); | 5868 | bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); |
5816 | 5869 | ||
@@ -6667,12 +6720,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
6667 | if (CHIP_MODE_IS_4_PORT(bp)) | 6720 | if (CHIP_MODE_IS_4_PORT(bp)) |
6668 | dsb_idx = BP_FUNC(bp); | 6721 | dsb_idx = BP_FUNC(bp); |
6669 | else | 6722 | else |
6670 | dsb_idx = BP_E1HVN(bp); | 6723 | dsb_idx = BP_VN(bp); |
6671 | 6724 | ||
6672 | prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? | 6725 | prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? |
6673 | IGU_BC_BASE_DSB_PROD + dsb_idx : | 6726 | IGU_BC_BASE_DSB_PROD + dsb_idx : |
6674 | IGU_NORM_BASE_DSB_PROD + dsb_idx); | 6727 | IGU_NORM_BASE_DSB_PROD + dsb_idx); |
6675 | 6728 | ||
6729 | /* | ||
6730 | * igu prods come in chunks of E1HVN_MAX (4) - | ||
6731 | * does not matters what is the current chip mode | ||
6732 | */ | ||
6676 | for (i = 0; i < (num_segs * E1HVN_MAX); | 6733 | for (i = 0; i < (num_segs * E1HVN_MAX); |
6677 | i += E1HVN_MAX) { | 6734 | i += E1HVN_MAX) { |
6678 | addr = IGU_REG_PROD_CONS_MEMORY + | 6735 | addr = IGU_REG_PROD_CONS_MEMORY + |
@@ -7566,7 +7623,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) | |||
7566 | u32 val; | 7623 | u32 val; |
7567 | /* The mac address is written to entries 1-4 to | 7624 | /* The mac address is written to entries 1-4 to |
7568 | preserve entry 0 which is used by the PMF */ | 7625 | preserve entry 0 which is used by the PMF */ |
7569 | u8 entry = (BP_E1HVN(bp) + 1)*8; | 7626 | u8 entry = (BP_VN(bp) + 1)*8; |
7570 | 7627 | ||
7571 | val = (mac_addr[0] << 8) | mac_addr[1]; | 7628 | val = (mac_addr[0] << 8) | mac_addr[1]; |
7572 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); | 7629 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); |
@@ -8542,10 +8599,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
8542 | /* Check if there is any driver already loaded */ | 8599 | /* Check if there is any driver already loaded */ |
8543 | val = REG_RD(bp, MISC_REG_UNPREPARED); | 8600 | val = REG_RD(bp, MISC_REG_UNPREPARED); |
8544 | if (val == 0x1) { | 8601 | if (val == 0x1) { |
8545 | /* Check if it is the UNDI driver | 8602 | |
8603 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); | ||
8604 | /* | ||
8605 | * Check if it is the UNDI driver | ||
8546 | * UNDI driver initializes CID offset for normal bell to 0x7 | 8606 | * UNDI driver initializes CID offset for normal bell to 0x7 |
8547 | */ | 8607 | */ |
8548 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
8549 | val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); | 8608 | val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); |
8550 | if (val == 0x7) { | 8609 | if (val == 0x7) { |
8551 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 8610 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
@@ -8583,9 +8642,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
8583 | bnx2x_fw_command(bp, reset_code, 0); | 8642 | bnx2x_fw_command(bp, reset_code, 0); |
8584 | } | 8643 | } |
8585 | 8644 | ||
8586 | /* now it's safe to release the lock */ | ||
8587 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
8588 | |||
8589 | bnx2x_undi_int_disable(bp); | 8645 | bnx2x_undi_int_disable(bp); |
8590 | port = BP_PORT(bp); | 8646 | port = BP_PORT(bp); |
8591 | 8647 | ||
@@ -8635,8 +8691,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
8635 | bp->fw_seq = | 8691 | bp->fw_seq = |
8636 | (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & | 8692 | (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & |
8637 | DRV_MSG_SEQ_NUMBER_MASK); | 8693 | DRV_MSG_SEQ_NUMBER_MASK); |
8638 | } else | 8694 | } |
8639 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | 8695 | |
8696 | /* now it's safe to release the lock */ | ||
8697 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); | ||
8640 | } | 8698 | } |
8641 | } | 8699 | } |
8642 | 8700 | ||
@@ -8773,13 +8831,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
8773 | static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) | 8831 | static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) |
8774 | { | 8832 | { |
8775 | int pfid = BP_FUNC(bp); | 8833 | int pfid = BP_FUNC(bp); |
8776 | int vn = BP_E1HVN(bp); | ||
8777 | int igu_sb_id; | 8834 | int igu_sb_id; |
8778 | u32 val; | 8835 | u32 val; |
8779 | u8 fid, igu_sb_cnt = 0; | 8836 | u8 fid, igu_sb_cnt = 0; |
8780 | 8837 | ||
8781 | bp->igu_base_sb = 0xff; | 8838 | bp->igu_base_sb = 0xff; |
8782 | if (CHIP_INT_MODE_IS_BC(bp)) { | 8839 | if (CHIP_INT_MODE_IS_BC(bp)) { |
8840 | int vn = BP_VN(bp); | ||
8783 | igu_sb_cnt = bp->igu_sb_cnt; | 8841 | igu_sb_cnt = bp->igu_sb_cnt; |
8784 | bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * | 8842 | bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * |
8785 | FP_SB_MAX_E1x; | 8843 | FP_SB_MAX_E1x; |
@@ -9410,6 +9468,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
9410 | bp->igu_base_sb = 0; | 9468 | bp->igu_base_sb = 0; |
9411 | } else { | 9469 | } else { |
9412 | bp->common.int_block = INT_BLOCK_IGU; | 9470 | bp->common.int_block = INT_BLOCK_IGU; |
9471 | |||
9472 | /* do not allow device reset during IGU info preocessing */ | ||
9473 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); | ||
9474 | |||
9413 | val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); | 9475 | val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); |
9414 | 9476 | ||
9415 | if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { | 9477 | if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { |
@@ -9441,6 +9503,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
9441 | 9503 | ||
9442 | bnx2x_get_igu_cam_info(bp); | 9504 | bnx2x_get_igu_cam_info(bp); |
9443 | 9505 | ||
9506 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); | ||
9444 | } | 9507 | } |
9445 | 9508 | ||
9446 | /* | 9509 | /* |
@@ -9467,7 +9530,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
9467 | 9530 | ||
9468 | bp->mf_ov = 0; | 9531 | bp->mf_ov = 0; |
9469 | bp->mf_mode = 0; | 9532 | bp->mf_mode = 0; |
9470 | vn = BP_E1HVN(bp); | 9533 | vn = BP_VN(bp); |
9471 | 9534 | ||
9472 | if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { | 9535 | if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { |
9473 | BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", | 9536 | BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", |
@@ -9587,13 +9650,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
9587 | /* port info */ | 9650 | /* port info */ |
9588 | bnx2x_get_port_hwinfo(bp); | 9651 | bnx2x_get_port_hwinfo(bp); |
9589 | 9652 | ||
9590 | if (!BP_NOMCP(bp)) { | ||
9591 | bp->fw_seq = | ||
9592 | (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & | ||
9593 | DRV_MSG_SEQ_NUMBER_MASK); | ||
9594 | BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); | ||
9595 | } | ||
9596 | |||
9597 | /* Get MAC addresses */ | 9653 | /* Get MAC addresses */ |
9598 | bnx2x_get_mac_hwinfo(bp); | 9654 | bnx2x_get_mac_hwinfo(bp); |
9599 | 9655 | ||
@@ -9759,6 +9815,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
9759 | if (!BP_NOMCP(bp)) | 9815 | if (!BP_NOMCP(bp)) |
9760 | bnx2x_undi_unload(bp); | 9816 | bnx2x_undi_unload(bp); |
9761 | 9817 | ||
9818 | /* init fw_seq after undi_unload! */ | ||
9819 | if (!BP_NOMCP(bp)) { | ||
9820 | bp->fw_seq = | ||
9821 | (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & | ||
9822 | DRV_MSG_SEQ_NUMBER_MASK); | ||
9823 | BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); | ||
9824 | } | ||
9825 | |||
9762 | if (CHIP_REV_IS_FPGA(bp)) | 9826 | if (CHIP_REV_IS_FPGA(bp)) |
9763 | dev_err(&bp->pdev->dev, "FPGA detected\n"); | 9827 | dev_err(&bp->pdev->dev, "FPGA detected\n"); |
9764 | 9828 | ||
@@ -10253,17 +10317,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, | |||
10253 | /* clean indirect addresses */ | 10317 | /* clean indirect addresses */ |
10254 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, | 10318 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, |
10255 | PCICFG_VENDOR_ID_OFFSET); | 10319 | PCICFG_VENDOR_ID_OFFSET); |
10256 | /* Clean the following indirect addresses for all functions since it | 10320 | /* |
10321 | * Clean the following indirect addresses for all functions since it | ||
10257 | * is not used by the driver. | 10322 | * is not used by the driver. |
10258 | */ | 10323 | */ |
10259 | REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); | 10324 | REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); |
10260 | REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); | 10325 | REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); |
10261 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); | 10326 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); |
10262 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); | 10327 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); |
10263 | REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); | 10328 | |
10264 | REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); | 10329 | if (CHIP_IS_E1x(bp)) { |
10265 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); | 10330 | REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); |
10266 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); | 10331 | REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); |
10332 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); | ||
10333 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); | ||
10334 | } | ||
10267 | 10335 | ||
10268 | /* | 10336 | /* |
10269 | * Enable internal target-read (in case we are probed after PF FLR). | 10337 | * Enable internal target-read (in case we are probed after PF FLR). |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 40266c14e6dc..750e8445dac4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h | |||
@@ -5320,7 +5320,7 @@ | |||
5320 | #define XCM_REG_XX_OVFL_EVNT_ID 0x20058 | 5320 | #define XCM_REG_XX_OVFL_EVNT_ID 0x20058 |
5321 | #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) | 5321 | #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) |
5322 | #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) | 5322 | #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) |
5323 | #define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) | 5323 | #define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2) |
5324 | #define XMAC_CTRL_REG_RX_EN (0x1<<1) | 5324 | #define XMAC_CTRL_REG_RX_EN (0x1<<1) |
5325 | #define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) | 5325 | #define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) |
5326 | #define XMAC_CTRL_REG_TX_EN (0x1<<0) | 5326 | #define XMAC_CTRL_REG_TX_EN (0x1<<0) |
@@ -5766,7 +5766,7 @@ | |||
5766 | #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 | 5766 | #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 |
5767 | #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 | 5767 | #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 |
5768 | #define HW_LOCK_RESOURCE_SPIO 2 | 5768 | #define HW_LOCK_RESOURCE_SPIO 2 |
5769 | #define HW_LOCK_RESOURCE_UNDI 5 | 5769 | #define HW_LOCK_RESOURCE_RESET 5 |
5770 | #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) | 5770 | #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) |
5771 | #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) | 5771 | #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) |
5772 | #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) | 5772 | #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) |
@@ -6853,6 +6853,9 @@ Theotherbitsarereservedandshouldbezero*/ | |||
6853 | #define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 | 6853 | #define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 |
6854 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 | 6854 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 |
6855 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 | 6855 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 |
6856 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12 | ||
6857 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000 | ||
6858 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000 | ||
6856 | #define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 | 6859 | #define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 |
6857 | #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 | 6860 | #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 |
6858 | #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e | 6861 | #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 628f7b99614f..02ac6a771bf9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | |||
@@ -713,7 +713,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) | |||
713 | break; | 713 | break; |
714 | 714 | ||
715 | case MAC_TYPE_NONE: /* unreached */ | 715 | case MAC_TYPE_NONE: /* unreached */ |
716 | BNX2X_ERR("stats updated by DMAE but no MAC active\n"); | 716 | DP(BNX2X_MSG_STATS, |
717 | "stats updated by DMAE but no MAC active\n"); | ||
717 | return -1; | 718 | return -1; |
718 | 719 | ||
719 | default: /* unreached */ | 720 | default: /* unreached */ |
@@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp) | |||
1391 | 1392 | ||
1392 | static void bnx2x_func_stats_base_init(struct bnx2x *bp) | 1393 | static void bnx2x_func_stats_base_init(struct bnx2x *bp) |
1393 | { | 1394 | { |
1394 | int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; | 1395 | int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX; |
1395 | u32 func_stx; | 1396 | u32 func_stx; |
1396 | 1397 | ||
1397 | /* sanity */ | 1398 | /* sanity */ |
@@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp) | |||
1404 | func_stx = bp->func_stx; | 1405 | func_stx = bp->func_stx; |
1405 | 1406 | ||
1406 | for (vn = VN_0; vn < vn_max; vn++) { | 1407 | for (vn = VN_0; vn < vn_max; vn++) { |
1407 | int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; | 1408 | int mb_idx = BP_FW_MB_IDX_VN(bp, vn); |
1408 | 1409 | ||
1409 | bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); | 1410 | bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); |
1410 | bnx2x_func_stats_init(bp); | 1411 | bnx2x_func_stats_init(bp); |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 1485013b4b8c..26c6bd44a604 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -6738,12 +6738,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
6738 | !mss && skb->len > VLAN_ETH_FRAME_LEN) | 6738 | !mss && skb->len > VLAN_ETH_FRAME_LEN) |
6739 | base_flags |= TXD_FLAG_JMB_PKT; | 6739 | base_flags |= TXD_FLAG_JMB_PKT; |
6740 | 6740 | ||
6741 | #ifdef BCM_KERNEL_SUPPORTS_8021Q | ||
6742 | if (vlan_tx_tag_present(skb)) { | 6741 | if (vlan_tx_tag_present(skb)) { |
6743 | base_flags |= TXD_FLAG_VLAN; | 6742 | base_flags |= TXD_FLAG_VLAN; |
6744 | vlan = vlan_tx_tag_get(skb); | 6743 | vlan = vlan_tx_tag_get(skb); |
6745 | } | 6744 | } |
6746 | #endif | ||
6747 | 6745 | ||
6748 | len = skb_headlen(skb); | 6746 | len = skb_headlen(skb); |
6749 | 6747 | ||
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index f30b96fee840..212736bab6bb 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c | |||
@@ -1669,10 +1669,10 @@ static int gfar_get_cls_all(struct gfar_private *priv, | |||
1669 | u32 i = 0; | 1669 | u32 i = 0; |
1670 | 1670 | ||
1671 | list_for_each_entry(comp, &priv->rx_list.list, list) { | 1671 | list_for_each_entry(comp, &priv->rx_list.list, list) { |
1672 | if (i <= cmd->rule_cnt) { | 1672 | if (i == cmd->rule_cnt) |
1673 | rule_locs[i] = comp->fs.location; | 1673 | return -EMSGSIZE; |
1674 | i++; | 1674 | rule_locs[i] = comp->fs.location; |
1675 | } | 1675 | i++; |
1676 | } | 1676 | } |
1677 | 1677 | ||
1678 | cmd->data = MAX_FILER_IDX; | 1678 | cmd->data = MAX_FILER_IDX; |
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 8cca4a62b397..72b84de48756 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c | |||
@@ -395,7 +395,7 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada | |||
395 | } | 395 | } |
396 | 396 | ||
397 | /* recycle the current buffer on the rx queue */ | 397 | /* recycle the current buffer on the rx queue */ |
398 | static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | 398 | static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) |
399 | { | 399 | { |
400 | u32 q_index = adapter->rx_queue.index; | 400 | u32 q_index = adapter->rx_queue.index; |
401 | u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; | 401 | u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; |
@@ -403,6 +403,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |||
403 | unsigned int index = correlator & 0xffffffffUL; | 403 | unsigned int index = correlator & 0xffffffffUL; |
404 | union ibmveth_buf_desc desc; | 404 | union ibmveth_buf_desc desc; |
405 | unsigned long lpar_rc; | 405 | unsigned long lpar_rc; |
406 | int ret = 1; | ||
406 | 407 | ||
407 | BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); | 408 | BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); |
408 | BUG_ON(index >= adapter->rx_buff_pool[pool].size); | 409 | BUG_ON(index >= adapter->rx_buff_pool[pool].size); |
@@ -410,7 +411,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |||
410 | if (!adapter->rx_buff_pool[pool].active) { | 411 | if (!adapter->rx_buff_pool[pool].active) { |
411 | ibmveth_rxq_harvest_buffer(adapter); | 412 | ibmveth_rxq_harvest_buffer(adapter); |
412 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); | 413 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); |
413 | return; | 414 | goto out; |
414 | } | 415 | } |
415 | 416 | ||
416 | desc.fields.flags_len = IBMVETH_BUF_VALID | | 417 | desc.fields.flags_len = IBMVETH_BUF_VALID | |
@@ -423,12 +424,16 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |||
423 | netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed " | 424 | netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed " |
424 | "during recycle rc=%ld", lpar_rc); | 425 | "during recycle rc=%ld", lpar_rc); |
425 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); | 426 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); |
427 | ret = 0; | ||
426 | } | 428 | } |
427 | 429 | ||
428 | if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { | 430 | if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { |
429 | adapter->rx_queue.index = 0; | 431 | adapter->rx_queue.index = 0; |
430 | adapter->rx_queue.toggle = !adapter->rx_queue.toggle; | 432 | adapter->rx_queue.toggle = !adapter->rx_queue.toggle; |
431 | } | 433 | } |
434 | |||
435 | out: | ||
436 | return ret; | ||
432 | } | 437 | } |
433 | 438 | ||
434 | static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) | 439 | static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) |
@@ -752,7 +757,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) | |||
752 | struct ibmveth_adapter *adapter = netdev_priv(dev); | 757 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
753 | unsigned long set_attr, clr_attr, ret_attr; | 758 | unsigned long set_attr, clr_attr, ret_attr; |
754 | unsigned long set_attr6, clr_attr6; | 759 | unsigned long set_attr6, clr_attr6; |
755 | long ret, ret6; | 760 | long ret, ret4, ret6; |
756 | int rc1 = 0, rc2 = 0; | 761 | int rc1 = 0, rc2 = 0; |
757 | int restart = 0; | 762 | int restart = 0; |
758 | 763 | ||
@@ -765,6 +770,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) | |||
765 | 770 | ||
766 | set_attr = 0; | 771 | set_attr = 0; |
767 | clr_attr = 0; | 772 | clr_attr = 0; |
773 | set_attr6 = 0; | ||
774 | clr_attr6 = 0; | ||
768 | 775 | ||
769 | if (data) { | 776 | if (data) { |
770 | set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; | 777 | set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; |
@@ -779,16 +786,20 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) | |||
779 | if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && | 786 | if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && |
780 | !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && | 787 | !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && |
781 | (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { | 788 | (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { |
782 | ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, | 789 | ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, |
783 | set_attr, &ret_attr); | 790 | set_attr, &ret_attr); |
784 | 791 | ||
785 | if (ret != H_SUCCESS) { | 792 | if (ret4 != H_SUCCESS) { |
786 | netdev_err(dev, "unable to change IPv4 checksum " | 793 | netdev_err(dev, "unable to change IPv4 checksum " |
787 | "offload settings. %d rc=%ld\n", | 794 | "offload settings. %d rc=%ld\n", |
788 | data, ret); | 795 | data, ret4); |
796 | |||
797 | h_illan_attributes(adapter->vdev->unit_address, | ||
798 | set_attr, clr_attr, &ret_attr); | ||
799 | |||
800 | if (data == 1) | ||
801 | dev->features &= ~NETIF_F_IP_CSUM; | ||
789 | 802 | ||
790 | ret = h_illan_attributes(adapter->vdev->unit_address, | ||
791 | set_attr, clr_attr, &ret_attr); | ||
792 | } else { | 803 | } else { |
793 | adapter->fw_ipv4_csum_support = data; | 804 | adapter->fw_ipv4_csum_support = data; |
794 | } | 805 | } |
@@ -799,15 +810,18 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) | |||
799 | if (ret6 != H_SUCCESS) { | 810 | if (ret6 != H_SUCCESS) { |
800 | netdev_err(dev, "unable to change IPv6 checksum " | 811 | netdev_err(dev, "unable to change IPv6 checksum " |
801 | "offload settings. %d rc=%ld\n", | 812 | "offload settings. %d rc=%ld\n", |
802 | data, ret); | 813 | data, ret6); |
814 | |||
815 | h_illan_attributes(adapter->vdev->unit_address, | ||
816 | set_attr6, clr_attr6, &ret_attr); | ||
817 | |||
818 | if (data == 1) | ||
819 | dev->features &= ~NETIF_F_IPV6_CSUM; | ||
803 | 820 | ||
804 | ret = h_illan_attributes(adapter->vdev->unit_address, | ||
805 | set_attr6, clr_attr6, | ||
806 | &ret_attr); | ||
807 | } else | 821 | } else |
808 | adapter->fw_ipv6_csum_support = data; | 822 | adapter->fw_ipv6_csum_support = data; |
809 | 823 | ||
810 | if (ret != H_SUCCESS || ret6 != H_SUCCESS) | 824 | if (ret4 == H_SUCCESS || ret6 == H_SUCCESS) |
811 | adapter->rx_csum = data; | 825 | adapter->rx_csum = data; |
812 | else | 826 | else |
813 | rc1 = -EIO; | 827 | rc1 = -EIO; |
@@ -925,6 +939,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, | |||
925 | union ibmveth_buf_desc descs[6]; | 939 | union ibmveth_buf_desc descs[6]; |
926 | int last, i; | 940 | int last, i; |
927 | int force_bounce = 0; | 941 | int force_bounce = 0; |
942 | dma_addr_t dma_addr; | ||
928 | 943 | ||
929 | /* | 944 | /* |
930 | * veth handles a maximum of 6 segments including the header, so | 945 | * veth handles a maximum of 6 segments including the header, so |
@@ -989,17 +1004,16 @@ retry_bounce: | |||
989 | } | 1004 | } |
990 | 1005 | ||
991 | /* Map the header */ | 1006 | /* Map the header */ |
992 | descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, | 1007 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, |
993 | skb_headlen(skb), | 1008 | skb_headlen(skb), DMA_TO_DEVICE); |
994 | DMA_TO_DEVICE); | 1009 | if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) |
995 | if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address)) | ||
996 | goto map_failed; | 1010 | goto map_failed; |
997 | 1011 | ||
998 | descs[0].fields.flags_len = desc_flags | skb_headlen(skb); | 1012 | descs[0].fields.flags_len = desc_flags | skb_headlen(skb); |
1013 | descs[0].fields.address = dma_addr; | ||
999 | 1014 | ||
1000 | /* Map the frags */ | 1015 | /* Map the frags */ |
1001 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 1016 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
1002 | unsigned long dma_addr; | ||
1003 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1017 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1004 | 1018 | ||
1005 | dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0, | 1019 | dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0, |
@@ -1020,7 +1034,12 @@ retry_bounce: | |||
1020 | netdev->stats.tx_bytes += skb->len; | 1034 | netdev->stats.tx_bytes += skb->len; |
1021 | } | 1035 | } |
1022 | 1036 | ||
1023 | for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++) | 1037 | dma_unmap_single(&adapter->vdev->dev, |
1038 | descs[0].fields.address, | ||
1039 | descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK, | ||
1040 | DMA_TO_DEVICE); | ||
1041 | |||
1042 | for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) | ||
1024 | dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, | 1043 | dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, |
1025 | descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, | 1044 | descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, |
1026 | DMA_TO_DEVICE); | 1045 | DMA_TO_DEVICE); |
@@ -1083,8 +1102,9 @@ restart_poll: | |||
1083 | if (rx_flush) | 1102 | if (rx_flush) |
1084 | ibmveth_flush_buffer(skb->data, | 1103 | ibmveth_flush_buffer(skb->data, |
1085 | length + offset); | 1104 | length + offset); |
1105 | if (!ibmveth_rxq_recycle_buffer(adapter)) | ||
1106 | kfree_skb(skb); | ||
1086 | skb = new_skb; | 1107 | skb = new_skb; |
1087 | ibmveth_rxq_recycle_buffer(adapter); | ||
1088 | } else { | 1108 | } else { |
1089 | ibmveth_rxq_harvest_buffer(adapter); | 1109 | ibmveth_rxq_harvest_buffer(adapter); |
1090 | skb_reserve(skb, offset); | 1110 | skb_reserve(skb, offset); |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 8545c7aa93eb..a5a89ecb6f36 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c | |||
@@ -4026,6 +4026,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) | |||
4026 | checksum += eeprom_data; | 4026 | checksum += eeprom_data; |
4027 | } | 4027 | } |
4028 | 4028 | ||
4029 | #ifdef CONFIG_PARISC | ||
4030 | /* This is a signature and not a checksum on HP c8000 */ | ||
4031 | if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) | ||
4032 | return E1000_SUCCESS; | ||
4033 | |||
4034 | #endif | ||
4029 | if (checksum == (u16) EEPROM_SUM) | 4035 | if (checksum == (u16) EEPROM_SUM) |
4030 | return E1000_SUCCESS; | 4036 | return E1000_SUCCESS; |
4031 | else { | 4037 | else { |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 49e82de136a7..08439ca60734 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -1306,8 +1306,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1306 | if (ring_is_rsc_enabled(rx_ring)) | 1306 | if (ring_is_rsc_enabled(rx_ring)) |
1307 | pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); | 1307 | pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); |
1308 | 1308 | ||
1309 | /* if this is a skb from previous receive DMA will be 0 */ | 1309 | /* linear means we are building an skb from multiple pages */ |
1310 | if (rx_buffer_info->dma) { | 1310 | if (!skb_is_nonlinear(skb)) { |
1311 | u16 hlen; | 1311 | u16 hlen; |
1312 | if (pkt_is_rsc && | 1312 | if (pkt_is_rsc && |
1313 | !(staterr & IXGBE_RXD_STAT_EOP) && | 1313 | !(staterr & IXGBE_RXD_STAT_EOP) && |
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 1a3033d8e7ed..d17d0624c5e6 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/clk.h> | 40 | #include <linux/clk.h> |
41 | #include <linux/phy.h> | 41 | #include <linux/phy.h> |
42 | #include <linux/io.h> | 42 | #include <linux/io.h> |
43 | #include <linux/interrupt.h> | ||
43 | #include <linux/types.h> | 44 | #include <linux/types.h> |
44 | #include <asm/pgtable.h> | 45 | #include <asm/pgtable.h> |
45 | #include <asm/system.h> | 46 | #include <asm/system.h> |
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig index 7efa62427235..00bc4fc968c7 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | config PCH_GBE | 5 | config PCH_GBE |
6 | tristate "OKI SEMICONDUCTOR ML7223 IOH GbE (Intel EG20T PCH)" | 6 | tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" |
7 | depends on PCI | 7 | depends on PCI |
8 | select NET_CORE | 8 | select NET_CORE |
9 | select MII | 9 | select MII |
@@ -15,7 +15,8 @@ config PCH_GBE | |||
15 | to Gigabit Ethernet. This driver enables Gigabit Ethernet function. | 15 | to Gigabit Ethernet. This driver enables Gigabit Ethernet function. |
16 | 16 | ||
17 | This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ | 17 | This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ |
18 | Output Hub), ML7223. | 18 | Output Hub), ML7223/ML7831. |
19 | ML7223 IOH is for MP(Media Phone) use. | 19 | ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general |
20 | ML7223 is companion chip for Intel Atom E6xx series. | 20 | purpose use. |
21 | ML7223 is completely compatible for Intel EG20T PCH. | 21 | ML7223/ML7831 is companion chip for Intel Atom E6xx series. |
22 | ML7223/ML7831 is completely compatible for Intel EG20T PCH. | ||
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 59fac77d0dbb..a09a07197eb5 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h | |||
@@ -127,8 +127,8 @@ struct pch_gbe_regs { | |||
127 | 127 | ||
128 | /* Reset */ | 128 | /* Reset */ |
129 | #define PCH_GBE_ALL_RST 0x80000000 /* All reset */ | 129 | #define PCH_GBE_ALL_RST 0x80000000 /* All reset */ |
130 | #define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */ | 130 | #define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */ |
131 | #define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */ | 131 | #define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */ |
132 | 132 | ||
133 | /* TCP/IP Accelerator Control */ | 133 | /* TCP/IP Accelerator Control */ |
134 | #define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */ | 134 | #define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */ |
@@ -276,6 +276,9 @@ struct pch_gbe_regs { | |||
276 | #define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */ | 276 | #define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */ |
277 | #define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */ | 277 | #define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */ |
278 | 278 | ||
279 | /* RX DMA STATUS */ | ||
280 | #define PCH_GBE_IDLE_CHECK 0xFFFFFFFE | ||
281 | |||
279 | /* Wake On LAN Status */ | 282 | /* Wake On LAN Status */ |
280 | #define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */ | 283 | #define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */ |
281 | #define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */ | 284 | #define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */ |
@@ -471,6 +474,7 @@ struct pch_gbe_tx_desc { | |||
471 | struct pch_gbe_buffer { | 474 | struct pch_gbe_buffer { |
472 | struct sk_buff *skb; | 475 | struct sk_buff *skb; |
473 | dma_addr_t dma; | 476 | dma_addr_t dma; |
477 | unsigned char *rx_buffer; | ||
474 | unsigned long time_stamp; | 478 | unsigned long time_stamp; |
475 | u16 length; | 479 | u16 length; |
476 | bool mapped; | 480 | bool mapped; |
@@ -511,6 +515,9 @@ struct pch_gbe_tx_ring { | |||
511 | struct pch_gbe_rx_ring { | 515 | struct pch_gbe_rx_ring { |
512 | struct pch_gbe_rx_desc *desc; | 516 | struct pch_gbe_rx_desc *desc; |
513 | dma_addr_t dma; | 517 | dma_addr_t dma; |
518 | unsigned char *rx_buff_pool; | ||
519 | dma_addr_t rx_buff_pool_logic; | ||
520 | unsigned int rx_buff_pool_size; | ||
514 | unsigned int size; | 521 | unsigned int size; |
515 | unsigned int count; | 522 | unsigned int count; |
516 | unsigned int next_to_use; | 523 | unsigned int next_to_use; |
@@ -622,6 +629,7 @@ struct pch_gbe_adapter { | |||
622 | unsigned long rx_buffer_len; | 629 | unsigned long rx_buffer_len; |
623 | unsigned long tx_queue_len; | 630 | unsigned long tx_queue_len; |
624 | bool have_msi; | 631 | bool have_msi; |
632 | bool rx_stop_flag; | ||
625 | }; | 633 | }; |
626 | 634 | ||
627 | extern const char pch_driver_version[]; | 635 | extern const char pch_driver_version[]; |
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 72276fe78f8f..35a7c21680b3 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | |||
@@ -20,7 +20,6 @@ | |||
20 | 20 | ||
21 | #include "pch_gbe.h" | 21 | #include "pch_gbe.h" |
22 | #include "pch_gbe_api.h" | 22 | #include "pch_gbe_api.h" |
23 | #include <linux/prefetch.h> | ||
24 | 23 | ||
25 | #define DRV_VERSION "1.00" | 24 | #define DRV_VERSION "1.00" |
26 | const char pch_driver_version[] = DRV_VERSION; | 25 | const char pch_driver_version[] = DRV_VERSION; |
@@ -34,11 +33,15 @@ const char pch_driver_version[] = DRV_VERSION; | |||
34 | #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ | 33 | #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ |
35 | #define PCH_GBE_COPYBREAK_DEFAULT 256 | 34 | #define PCH_GBE_COPYBREAK_DEFAULT 256 |
36 | #define PCH_GBE_PCI_BAR 1 | 35 | #define PCH_GBE_PCI_BAR 1 |
36 | #define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */ | ||
37 | 37 | ||
38 | /* Macros for ML7223 */ | 38 | /* Macros for ML7223 */ |
39 | #define PCI_VENDOR_ID_ROHM 0x10db | 39 | #define PCI_VENDOR_ID_ROHM 0x10db |
40 | #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 | 40 | #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 |
41 | 41 | ||
42 | /* Macros for ML7831 */ | ||
43 | #define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802 | ||
44 | |||
42 | #define PCH_GBE_TX_WEIGHT 64 | 45 | #define PCH_GBE_TX_WEIGHT 64 |
43 | #define PCH_GBE_RX_WEIGHT 64 | 46 | #define PCH_GBE_RX_WEIGHT 64 |
44 | #define PCH_GBE_RX_BUFFER_WRITE 16 | 47 | #define PCH_GBE_RX_BUFFER_WRITE 16 |
@@ -52,6 +55,7 @@ const char pch_driver_version[] = DRV_VERSION; | |||
52 | ) | 55 | ) |
53 | 56 | ||
54 | /* Ethertype field values */ | 57 | /* Ethertype field values */ |
58 | #define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880 | ||
55 | #define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318 | 59 | #define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318 |
56 | #define PCH_GBE_FRAME_SIZE_2048 2048 | 60 | #define PCH_GBE_FRAME_SIZE_2048 2048 |
57 | #define PCH_GBE_FRAME_SIZE_4096 4096 | 61 | #define PCH_GBE_FRAME_SIZE_4096 4096 |
@@ -83,10 +87,12 @@ const char pch_driver_version[] = DRV_VERSION; | |||
83 | #define PCH_GBE_INT_ENABLE_MASK ( \ | 87 | #define PCH_GBE_INT_ENABLE_MASK ( \ |
84 | PCH_GBE_INT_RX_DMA_CMPLT | \ | 88 | PCH_GBE_INT_RX_DMA_CMPLT | \ |
85 | PCH_GBE_INT_RX_DSC_EMP | \ | 89 | PCH_GBE_INT_RX_DSC_EMP | \ |
90 | PCH_GBE_INT_RX_FIFO_ERR | \ | ||
86 | PCH_GBE_INT_WOL_DET | \ | 91 | PCH_GBE_INT_WOL_DET | \ |
87 | PCH_GBE_INT_TX_CMPLT \ | 92 | PCH_GBE_INT_TX_CMPLT \ |
88 | ) | 93 | ) |
89 | 94 | ||
95 | #define PCH_GBE_INT_DISABLE_ALL 0 | ||
90 | 96 | ||
91 | static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; | 97 | static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; |
92 | 98 | ||
@@ -138,6 +144,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit) | |||
138 | if (!tmp) | 144 | if (!tmp) |
139 | pr_err("Error: busy bit is not cleared\n"); | 145 | pr_err("Error: busy bit is not cleared\n"); |
140 | } | 146 | } |
147 | |||
148 | /** | ||
149 | * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context | ||
150 | * @reg: Pointer of register | ||
151 | * @busy: Busy bit | ||
152 | */ | ||
153 | static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit) | ||
154 | { | ||
155 | u32 tmp; | ||
156 | int ret = -1; | ||
157 | /* wait busy */ | ||
158 | tmp = 20; | ||
159 | while ((ioread32(reg) & bit) && --tmp) | ||
160 | udelay(5); | ||
161 | if (!tmp) | ||
162 | pr_err("Error: busy bit is not cleared\n"); | ||
163 | else | ||
164 | ret = 0; | ||
165 | return ret; | ||
166 | } | ||
167 | |||
141 | /** | 168 | /** |
142 | * pch_gbe_mac_mar_set - Set MAC address register | 169 | * pch_gbe_mac_mar_set - Set MAC address register |
143 | * @hw: Pointer to the HW structure | 170 | * @hw: Pointer to the HW structure |
@@ -189,6 +216,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw) | |||
189 | return; | 216 | return; |
190 | } | 217 | } |
191 | 218 | ||
219 | static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) | ||
220 | { | ||
221 | /* Read the MAC address. and store to the private data */ | ||
222 | pch_gbe_mac_read_mac_addr(hw); | ||
223 | iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); | ||
224 | pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); | ||
225 | /* Setup the MAC address */ | ||
226 | pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); | ||
227 | return; | ||
228 | } | ||
229 | |||
192 | /** | 230 | /** |
193 | * pch_gbe_mac_init_rx_addrs - Initialize receive address's | 231 | * pch_gbe_mac_init_rx_addrs - Initialize receive address's |
194 | * @hw: Pointer to the HW structure | 232 | * @hw: Pointer to the HW structure |
@@ -671,13 +709,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter) | |||
671 | 709 | ||
672 | tcpip = ioread32(&hw->reg->TCPIP_ACC); | 710 | tcpip = ioread32(&hw->reg->TCPIP_ACC); |
673 | 711 | ||
674 | if (netdev->features & NETIF_F_RXCSUM) { | 712 | tcpip |= PCH_GBE_RX_TCPIPACC_OFF; |
675 | tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF; | 713 | tcpip &= ~PCH_GBE_RX_TCPIPACC_EN; |
676 | tcpip |= PCH_GBE_RX_TCPIPACC_EN; | ||
677 | } else { | ||
678 | tcpip |= PCH_GBE_RX_TCPIPACC_OFF; | ||
679 | tcpip &= ~PCH_GBE_RX_TCPIPACC_EN; | ||
680 | } | ||
681 | iowrite32(tcpip, &hw->reg->TCPIP_ACC); | 714 | iowrite32(tcpip, &hw->reg->TCPIP_ACC); |
682 | return; | 715 | return; |
683 | } | 716 | } |
@@ -717,13 +750,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) | |||
717 | iowrite32(rdba, &hw->reg->RX_DSC_BASE); | 750 | iowrite32(rdba, &hw->reg->RX_DSC_BASE); |
718 | iowrite32(rdlen, &hw->reg->RX_DSC_SIZE); | 751 | iowrite32(rdlen, &hw->reg->RX_DSC_SIZE); |
719 | iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); | 752 | iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); |
720 | |||
721 | /* Enables Receive DMA */ | ||
722 | rxdma = ioread32(&hw->reg->DMA_CTRL); | ||
723 | rxdma |= PCH_GBE_RX_DMA_EN; | ||
724 | iowrite32(rxdma, &hw->reg->DMA_CTRL); | ||
725 | /* Enables Receive */ | ||
726 | iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); | ||
727 | } | 753 | } |
728 | 754 | ||
729 | /** | 755 | /** |
@@ -1097,6 +1123,48 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter) | |||
1097 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 1123 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
1098 | } | 1124 | } |
1099 | 1125 | ||
1126 | static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter) | ||
1127 | { | ||
1128 | struct pch_gbe_hw *hw = &adapter->hw; | ||
1129 | u32 rxdma; | ||
1130 | u16 value; | ||
1131 | int ret; | ||
1132 | |||
1133 | /* Disable Receive DMA */ | ||
1134 | rxdma = ioread32(&hw->reg->DMA_CTRL); | ||
1135 | rxdma &= ~PCH_GBE_RX_DMA_EN; | ||
1136 | iowrite32(rxdma, &hw->reg->DMA_CTRL); | ||
1137 | /* Wait Rx DMA BUS is IDLE */ | ||
1138 | ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK); | ||
1139 | if (ret) { | ||
1140 | /* Disable Bus master */ | ||
1141 | pci_read_config_word(adapter->pdev, PCI_COMMAND, &value); | ||
1142 | value &= ~PCI_COMMAND_MASTER; | ||
1143 | pci_write_config_word(adapter->pdev, PCI_COMMAND, value); | ||
1144 | /* Stop Receive */ | ||
1145 | pch_gbe_mac_reset_rx(hw); | ||
1146 | /* Enable Bus master */ | ||
1147 | value |= PCI_COMMAND_MASTER; | ||
1148 | pci_write_config_word(adapter->pdev, PCI_COMMAND, value); | ||
1149 | } else { | ||
1150 | /* Stop Receive */ | ||
1151 | pch_gbe_mac_reset_rx(hw); | ||
1152 | } | ||
1153 | } | ||
1154 | |||
1155 | static void pch_gbe_start_receive(struct pch_gbe_hw *hw) | ||
1156 | { | ||
1157 | u32 rxdma; | ||
1158 | |||
1159 | /* Enables Receive DMA */ | ||
1160 | rxdma = ioread32(&hw->reg->DMA_CTRL); | ||
1161 | rxdma |= PCH_GBE_RX_DMA_EN; | ||
1162 | iowrite32(rxdma, &hw->reg->DMA_CTRL); | ||
1163 | /* Enables Receive */ | ||
1164 | iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); | ||
1165 | return; | ||
1166 | } | ||
1167 | |||
1100 | /** | 1168 | /** |
1101 | * pch_gbe_intr - Interrupt Handler | 1169 | * pch_gbe_intr - Interrupt Handler |
1102 | * @irq: Interrupt number | 1170 | * @irq: Interrupt number |
@@ -1123,7 +1191,15 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) | |||
1123 | if (int_st & PCH_GBE_INT_RX_FRAME_ERR) | 1191 | if (int_st & PCH_GBE_INT_RX_FRAME_ERR) |
1124 | adapter->stats.intr_rx_frame_err_count++; | 1192 | adapter->stats.intr_rx_frame_err_count++; |
1125 | if (int_st & PCH_GBE_INT_RX_FIFO_ERR) | 1193 | if (int_st & PCH_GBE_INT_RX_FIFO_ERR) |
1126 | adapter->stats.intr_rx_fifo_err_count++; | 1194 | if (!adapter->rx_stop_flag) { |
1195 | adapter->stats.intr_rx_fifo_err_count++; | ||
1196 | pr_debug("Rx fifo over run\n"); | ||
1197 | adapter->rx_stop_flag = true; | ||
1198 | int_en = ioread32(&hw->reg->INT_EN); | ||
1199 | iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), | ||
1200 | &hw->reg->INT_EN); | ||
1201 | pch_gbe_stop_receive(adapter); | ||
1202 | } | ||
1127 | if (int_st & PCH_GBE_INT_RX_DMA_ERR) | 1203 | if (int_st & PCH_GBE_INT_RX_DMA_ERR) |
1128 | adapter->stats.intr_rx_dma_err_count++; | 1204 | adapter->stats.intr_rx_dma_err_count++; |
1129 | if (int_st & PCH_GBE_INT_TX_FIFO_ERR) | 1205 | if (int_st & PCH_GBE_INT_TX_FIFO_ERR) |
@@ -1135,7 +1211,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) | |||
1135 | /* When Rx descriptor is empty */ | 1211 | /* When Rx descriptor is empty */ |
1136 | if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { | 1212 | if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { |
1137 | adapter->stats.intr_rx_dsc_empty_count++; | 1213 | adapter->stats.intr_rx_dsc_empty_count++; |
1138 | pr_err("Rx descriptor is empty\n"); | 1214 | pr_debug("Rx descriptor is empty\n"); |
1139 | int_en = ioread32(&hw->reg->INT_EN); | 1215 | int_en = ioread32(&hw->reg->INT_EN); |
1140 | iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); | 1216 | iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); |
1141 | if (hw->mac.tx_fc_enable) { | 1217 | if (hw->mac.tx_fc_enable) { |
@@ -1185,29 +1261,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter, | |||
1185 | unsigned int i; | 1261 | unsigned int i; |
1186 | unsigned int bufsz; | 1262 | unsigned int bufsz; |
1187 | 1263 | ||
1188 | bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN; | 1264 | bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; |
1189 | i = rx_ring->next_to_use; | 1265 | i = rx_ring->next_to_use; |
1190 | 1266 | ||
1191 | while ((cleaned_count--)) { | 1267 | while ((cleaned_count--)) { |
1192 | buffer_info = &rx_ring->buffer_info[i]; | 1268 | buffer_info = &rx_ring->buffer_info[i]; |
1193 | skb = buffer_info->skb; | 1269 | skb = netdev_alloc_skb(netdev, bufsz); |
1194 | if (skb) { | 1270 | if (unlikely(!skb)) { |
1195 | skb_trim(skb, 0); | 1271 | /* Better luck next round */ |
1196 | } else { | 1272 | adapter->stats.rx_alloc_buff_failed++; |
1197 | skb = netdev_alloc_skb(netdev, bufsz); | 1273 | break; |
1198 | if (unlikely(!skb)) { | ||
1199 | /* Better luck next round */ | ||
1200 | adapter->stats.rx_alloc_buff_failed++; | ||
1201 | break; | ||
1202 | } | ||
1203 | /* 64byte align */ | ||
1204 | skb_reserve(skb, PCH_GBE_DMA_ALIGN); | ||
1205 | |||
1206 | buffer_info->skb = skb; | ||
1207 | buffer_info->length = adapter->rx_buffer_len; | ||
1208 | } | 1274 | } |
1275 | /* align */ | ||
1276 | skb_reserve(skb, NET_IP_ALIGN); | ||
1277 | buffer_info->skb = skb; | ||
1278 | |||
1209 | buffer_info->dma = dma_map_single(&pdev->dev, | 1279 | buffer_info->dma = dma_map_single(&pdev->dev, |
1210 | skb->data, | 1280 | buffer_info->rx_buffer, |
1211 | buffer_info->length, | 1281 | buffer_info->length, |
1212 | DMA_FROM_DEVICE); | 1282 | DMA_FROM_DEVICE); |
1213 | if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { | 1283 | if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { |
@@ -1240,6 +1310,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter, | |||
1240 | return; | 1310 | return; |
1241 | } | 1311 | } |
1242 | 1312 | ||
1313 | static int | ||
1314 | pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, | ||
1315 | struct pch_gbe_rx_ring *rx_ring, int cleaned_count) | ||
1316 | { | ||
1317 | struct pci_dev *pdev = adapter->pdev; | ||
1318 | struct pch_gbe_buffer *buffer_info; | ||
1319 | unsigned int i; | ||
1320 | unsigned int bufsz; | ||
1321 | unsigned int size; | ||
1322 | |||
1323 | bufsz = adapter->rx_buffer_len; | ||
1324 | |||
1325 | size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; | ||
1326 | rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size, | ||
1327 | &rx_ring->rx_buff_pool_logic, | ||
1328 | GFP_KERNEL); | ||
1329 | if (!rx_ring->rx_buff_pool) { | ||
1330 | pr_err("Unable to allocate memory for the receive poll buffer\n"); | ||
1331 | return -ENOMEM; | ||
1332 | } | ||
1333 | memset(rx_ring->rx_buff_pool, 0, size); | ||
1334 | rx_ring->rx_buff_pool_size = size; | ||
1335 | for (i = 0; i < rx_ring->count; i++) { | ||
1336 | buffer_info = &rx_ring->buffer_info[i]; | ||
1337 | buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i; | ||
1338 | buffer_info->length = bufsz; | ||
1339 | } | ||
1340 | return 0; | ||
1341 | } | ||
1342 | |||
1243 | /** | 1343 | /** |
1244 | * pch_gbe_alloc_tx_buffers - Allocate transmit buffers | 1344 | * pch_gbe_alloc_tx_buffers - Allocate transmit buffers |
1245 | * @adapter: Board private structure | 1345 | * @adapter: Board private structure |
@@ -1380,7 +1480,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1380 | unsigned int i; | 1480 | unsigned int i; |
1381 | unsigned int cleaned_count = 0; | 1481 | unsigned int cleaned_count = 0; |
1382 | bool cleaned = false; | 1482 | bool cleaned = false; |
1383 | struct sk_buff *skb, *new_skb; | 1483 | struct sk_buff *skb; |
1384 | u8 dma_status; | 1484 | u8 dma_status; |
1385 | u16 gbec_status; | 1485 | u16 gbec_status; |
1386 | u32 tcp_ip_status; | 1486 | u32 tcp_ip_status; |
@@ -1401,13 +1501,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1401 | rx_desc->gbec_status = DSC_INIT16; | 1501 | rx_desc->gbec_status = DSC_INIT16; |
1402 | buffer_info = &rx_ring->buffer_info[i]; | 1502 | buffer_info = &rx_ring->buffer_info[i]; |
1403 | skb = buffer_info->skb; | 1503 | skb = buffer_info->skb; |
1504 | buffer_info->skb = NULL; | ||
1404 | 1505 | ||
1405 | /* unmap dma */ | 1506 | /* unmap dma */ |
1406 | dma_unmap_single(&pdev->dev, buffer_info->dma, | 1507 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
1407 | buffer_info->length, DMA_FROM_DEVICE); | 1508 | buffer_info->length, DMA_FROM_DEVICE); |
1408 | buffer_info->mapped = false; | 1509 | buffer_info->mapped = false; |
1409 | /* Prefetch the packet */ | ||
1410 | prefetch(skb->data); | ||
1411 | 1510 | ||
1412 | pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " | 1511 | pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " |
1413 | "TCP:0x%08x] BufInf = 0x%p\n", | 1512 | "TCP:0x%08x] BufInf = 0x%p\n", |
@@ -1427,70 +1526,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1427 | pr_err("Receive CRC Error\n"); | 1526 | pr_err("Receive CRC Error\n"); |
1428 | } else { | 1527 | } else { |
1429 | /* get receive length */ | 1528 | /* get receive length */ |
1430 | /* length convert[-3] */ | 1529 | /* length convert[-3], length includes FCS length */ |
1431 | length = (rx_desc->rx_words_eob) - 3; | 1530 | length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN; |
1432 | 1531 | if (rx_desc->rx_words_eob & 0x02) | |
1433 | /* Decide the data conversion method */ | 1532 | length = length - 4; |
1434 | if (!(netdev->features & NETIF_F_RXCSUM)) { | 1533 | /* |
1435 | /* [Header:14][payload] */ | 1534 | * buffer_info->rx_buffer: [Header:14][payload] |
1436 | if (NET_IP_ALIGN) { | 1535 | * skb->data: [Reserve:2][Header:14][payload] |
1437 | /* Because alignment differs, | 1536 | */ |
1438 | * the new_skb is newly allocated, | 1537 | memcpy(skb->data, buffer_info->rx_buffer, length); |
1439 | * and data is copied to new_skb.*/ | 1538 | |
1440 | new_skb = netdev_alloc_skb(netdev, | ||
1441 | length + NET_IP_ALIGN); | ||
1442 | if (!new_skb) { | ||
1443 | /* dorrop error */ | ||
1444 | pr_err("New skb allocation " | ||
1445 | "Error\n"); | ||
1446 | goto dorrop; | ||
1447 | } | ||
1448 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1449 | memcpy(new_skb->data, skb->data, | ||
1450 | length); | ||
1451 | skb = new_skb; | ||
1452 | } else { | ||
1453 | /* DMA buffer is used as SKB as it is.*/ | ||
1454 | buffer_info->skb = NULL; | ||
1455 | } | ||
1456 | } else { | ||
1457 | /* [Header:14][padding:2][payload] */ | ||
1458 | /* The length includes padding length */ | ||
1459 | length = length - PCH_GBE_DMA_PADDING; | ||
1460 | if ((length < copybreak) || | ||
1461 | (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { | ||
1462 | /* Because alignment differs, | ||
1463 | * the new_skb is newly allocated, | ||
1464 | * and data is copied to new_skb. | ||
1465 | * Padding data is deleted | ||
1466 | * at the time of a copy.*/ | ||
1467 | new_skb = netdev_alloc_skb(netdev, | ||
1468 | length + NET_IP_ALIGN); | ||
1469 | if (!new_skb) { | ||
1470 | /* dorrop error */ | ||
1471 | pr_err("New skb allocation " | ||
1472 | "Error\n"); | ||
1473 | goto dorrop; | ||
1474 | } | ||
1475 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1476 | memcpy(new_skb->data, skb->data, | ||
1477 | ETH_HLEN); | ||
1478 | memcpy(&new_skb->data[ETH_HLEN], | ||
1479 | &skb->data[ETH_HLEN + | ||
1480 | PCH_GBE_DMA_PADDING], | ||
1481 | length - ETH_HLEN); | ||
1482 | skb = new_skb; | ||
1483 | } else { | ||
1484 | /* Padding data is deleted | ||
1485 | * by moving header data.*/ | ||
1486 | memmove(&skb->data[PCH_GBE_DMA_PADDING], | ||
1487 | &skb->data[0], ETH_HLEN); | ||
1488 | skb_reserve(skb, NET_IP_ALIGN); | ||
1489 | buffer_info->skb = NULL; | ||
1490 | } | ||
1491 | } | ||
1492 | /* The length includes FCS length */ | ||
1493 | length = length - ETH_FCS_LEN; | ||
1494 | /* update status of driver */ | 1539 | /* update status of driver */ |
1495 | adapter->stats.rx_bytes += length; | 1540 | adapter->stats.rx_bytes += length; |
1496 | adapter->stats.rx_packets++; | 1541 | adapter->stats.rx_packets++; |
@@ -1509,7 +1554,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1509 | pr_debug("Receive skb->ip_summed: %d length: %d\n", | 1554 | pr_debug("Receive skb->ip_summed: %d length: %d\n", |
1510 | skb->ip_summed, length); | 1555 | skb->ip_summed, length); |
1511 | } | 1556 | } |
1512 | dorrop: | ||
1513 | /* return some buffers to hardware, one at a time is too slow */ | 1557 | /* return some buffers to hardware, one at a time is too slow */ |
1514 | if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { | 1558 | if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { |
1515 | pch_gbe_alloc_rx_buffers(adapter, rx_ring, | 1559 | pch_gbe_alloc_rx_buffers(adapter, rx_ring, |
@@ -1714,9 +1758,15 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) | |||
1714 | pr_err("Error: can't bring device up\n"); | 1758 | pr_err("Error: can't bring device up\n"); |
1715 | return err; | 1759 | return err; |
1716 | } | 1760 | } |
1761 | err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); | ||
1762 | if (err) { | ||
1763 | pr_err("Error: can't bring device up\n"); | ||
1764 | return err; | ||
1765 | } | ||
1717 | pch_gbe_alloc_tx_buffers(adapter, tx_ring); | 1766 | pch_gbe_alloc_tx_buffers(adapter, tx_ring); |
1718 | pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); | 1767 | pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); |
1719 | adapter->tx_queue_len = netdev->tx_queue_len; | 1768 | adapter->tx_queue_len = netdev->tx_queue_len; |
1769 | pch_gbe_start_receive(&adapter->hw); | ||
1720 | 1770 | ||
1721 | mod_timer(&adapter->watchdog_timer, jiffies); | 1771 | mod_timer(&adapter->watchdog_timer, jiffies); |
1722 | 1772 | ||
@@ -1734,6 +1784,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) | |||
1734 | void pch_gbe_down(struct pch_gbe_adapter *adapter) | 1784 | void pch_gbe_down(struct pch_gbe_adapter *adapter) |
1735 | { | 1785 | { |
1736 | struct net_device *netdev = adapter->netdev; | 1786 | struct net_device *netdev = adapter->netdev; |
1787 | struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; | ||
1737 | 1788 | ||
1738 | /* signal that we're down so the interrupt handler does not | 1789 | /* signal that we're down so the interrupt handler does not |
1739 | * reschedule our watchdog timer */ | 1790 | * reschedule our watchdog timer */ |
@@ -1752,6 +1803,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter) | |||
1752 | pch_gbe_reset(adapter); | 1803 | pch_gbe_reset(adapter); |
1753 | pch_gbe_clean_tx_ring(adapter, adapter->tx_ring); | 1804 | pch_gbe_clean_tx_ring(adapter, adapter->tx_ring); |
1754 | pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); | 1805 | pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); |
1806 | |||
1807 | pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size, | ||
1808 | rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic); | ||
1809 | rx_ring->rx_buff_pool_logic = 0; | ||
1810 | rx_ring->rx_buff_pool_size = 0; | ||
1811 | rx_ring->rx_buff_pool = NULL; | ||
1755 | } | 1812 | } |
1756 | 1813 | ||
1757 | /** | 1814 | /** |
@@ -2004,6 +2061,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) | |||
2004 | { | 2061 | { |
2005 | struct pch_gbe_adapter *adapter = netdev_priv(netdev); | 2062 | struct pch_gbe_adapter *adapter = netdev_priv(netdev); |
2006 | int max_frame; | 2063 | int max_frame; |
2064 | unsigned long old_rx_buffer_len = adapter->rx_buffer_len; | ||
2065 | int err; | ||
2007 | 2066 | ||
2008 | max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; | 2067 | max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; |
2009 | if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || | 2068 | if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || |
@@ -2018,14 +2077,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) | |||
2018 | else if (max_frame <= PCH_GBE_FRAME_SIZE_8192) | 2077 | else if (max_frame <= PCH_GBE_FRAME_SIZE_8192) |
2019 | adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192; | 2078 | adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192; |
2020 | else | 2079 | else |
2021 | adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE; | 2080 | adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE; |
2022 | netdev->mtu = new_mtu; | ||
2023 | adapter->hw.mac.max_frame_size = max_frame; | ||
2024 | 2081 | ||
2025 | if (netif_running(netdev)) | 2082 | if (netif_running(netdev)) { |
2026 | pch_gbe_reinit_locked(adapter); | 2083 | pch_gbe_down(adapter); |
2027 | else | 2084 | err = pch_gbe_up(adapter); |
2085 | if (err) { | ||
2086 | adapter->rx_buffer_len = old_rx_buffer_len; | ||
2087 | pch_gbe_up(adapter); | ||
2088 | return -ENOMEM; | ||
2089 | } else { | ||
2090 | netdev->mtu = new_mtu; | ||
2091 | adapter->hw.mac.max_frame_size = max_frame; | ||
2092 | } | ||
2093 | } else { | ||
2028 | pch_gbe_reset(adapter); | 2094 | pch_gbe_reset(adapter); |
2095 | netdev->mtu = new_mtu; | ||
2096 | adapter->hw.mac.max_frame_size = max_frame; | ||
2097 | } | ||
2029 | 2098 | ||
2030 | pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", | 2099 | pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", |
2031 | max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, | 2100 | max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, |
@@ -2103,6 +2172,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) | |||
2103 | int work_done = 0; | 2172 | int work_done = 0; |
2104 | bool poll_end_flag = false; | 2173 | bool poll_end_flag = false; |
2105 | bool cleaned = false; | 2174 | bool cleaned = false; |
2175 | u32 int_en; | ||
2106 | 2176 | ||
2107 | pr_debug("budget : %d\n", budget); | 2177 | pr_debug("budget : %d\n", budget); |
2108 | 2178 | ||
@@ -2110,8 +2180,15 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) | |||
2110 | if (!netif_carrier_ok(netdev)) { | 2180 | if (!netif_carrier_ok(netdev)) { |
2111 | poll_end_flag = true; | 2181 | poll_end_flag = true; |
2112 | } else { | 2182 | } else { |
2113 | cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); | ||
2114 | pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); | 2183 | pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); |
2184 | if (adapter->rx_stop_flag) { | ||
2185 | adapter->rx_stop_flag = false; | ||
2186 | pch_gbe_start_receive(&adapter->hw); | ||
2187 | int_en = ioread32(&adapter->hw.reg->INT_EN); | ||
2188 | iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), | ||
2189 | &adapter->hw.reg->INT_EN); | ||
2190 | } | ||
2191 | cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); | ||
2115 | 2192 | ||
2116 | if (cleaned) | 2193 | if (cleaned) |
2117 | work_done = budget; | 2194 | work_done = budget; |
@@ -2452,6 +2529,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { | |||
2452 | .class = (PCI_CLASS_NETWORK_ETHERNET << 8), | 2529 | .class = (PCI_CLASS_NETWORK_ETHERNET << 8), |
2453 | .class_mask = (0xFFFF00) | 2530 | .class_mask = (0xFFFF00) |
2454 | }, | 2531 | }, |
2532 | {.vendor = PCI_VENDOR_ID_ROHM, | ||
2533 | .device = PCI_DEVICE_ID_ROHM_ML7831_GBE, | ||
2534 | .subvendor = PCI_ANY_ID, | ||
2535 | .subdevice = PCI_ANY_ID, | ||
2536 | .class = (PCI_CLASS_NETWORK_ETHERNET << 8), | ||
2537 | .class_mask = (0xFFFF00) | ||
2538 | }, | ||
2455 | /* required last entry */ | 2539 | /* required last entry */ |
2456 | {0} | 2540 | {0} |
2457 | }; | 2541 | }; |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 835bbb534c5d..6eb9f4ea3bfd 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -407,6 +407,7 @@ enum rtl_register_content { | |||
407 | RxOK = 0x0001, | 407 | RxOK = 0x0001, |
408 | 408 | ||
409 | /* RxStatusDesc */ | 409 | /* RxStatusDesc */ |
410 | RxBOVF = (1 << 24), | ||
410 | RxFOVF = (1 << 23), | 411 | RxFOVF = (1 << 23), |
411 | RxRWT = (1 << 22), | 412 | RxRWT = (1 << 22), |
412 | RxRES = (1 << 21), | 413 | RxRES = (1 << 21), |
@@ -682,6 +683,7 @@ struct rtl8169_private { | |||
682 | struct mii_if_info mii; | 683 | struct mii_if_info mii; |
683 | struct rtl8169_counters counters; | 684 | struct rtl8169_counters counters; |
684 | u32 saved_wolopts; | 685 | u32 saved_wolopts; |
686 | u32 opts1_mask; | ||
685 | 687 | ||
686 | struct rtl_fw { | 688 | struct rtl_fw { |
687 | const struct firmware *fw; | 689 | const struct firmware *fw; |
@@ -710,6 +712,7 @@ MODULE_FIRMWARE(FIRMWARE_8168D_1); | |||
710 | MODULE_FIRMWARE(FIRMWARE_8168D_2); | 712 | MODULE_FIRMWARE(FIRMWARE_8168D_2); |
711 | MODULE_FIRMWARE(FIRMWARE_8168E_1); | 713 | MODULE_FIRMWARE(FIRMWARE_8168E_1); |
712 | MODULE_FIRMWARE(FIRMWARE_8168E_2); | 714 | MODULE_FIRMWARE(FIRMWARE_8168E_2); |
715 | MODULE_FIRMWARE(FIRMWARE_8168E_3); | ||
713 | MODULE_FIRMWARE(FIRMWARE_8105E_1); | 716 | MODULE_FIRMWARE(FIRMWARE_8105E_1); |
714 | 717 | ||
715 | static int rtl8169_open(struct net_device *dev); | 718 | static int rtl8169_open(struct net_device *dev); |
@@ -3077,6 +3080,14 @@ static void rtl8169_phy_reset(struct net_device *dev, | |||
3077 | netif_err(tp, link, dev, "PHY reset failed\n"); | 3080 | netif_err(tp, link, dev, "PHY reset failed\n"); |
3078 | } | 3081 | } |
3079 | 3082 | ||
3083 | static bool rtl_tbi_enabled(struct rtl8169_private *tp) | ||
3084 | { | ||
3085 | void __iomem *ioaddr = tp->mmio_addr; | ||
3086 | |||
3087 | return (tp->mac_version == RTL_GIGA_MAC_VER_01) && | ||
3088 | (RTL_R8(PHYstatus) & TBI_Enable); | ||
3089 | } | ||
3090 | |||
3080 | static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) | 3091 | static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) |
3081 | { | 3092 | { |
3082 | void __iomem *ioaddr = tp->mmio_addr; | 3093 | void __iomem *ioaddr = tp->mmio_addr; |
@@ -3109,7 +3120,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) | |||
3109 | ADVERTISED_1000baseT_Half | | 3120 | ADVERTISED_1000baseT_Half | |
3110 | ADVERTISED_1000baseT_Full : 0)); | 3121 | ADVERTISED_1000baseT_Full : 0)); |
3111 | 3122 | ||
3112 | if (RTL_R8(PHYstatus) & TBI_Enable) | 3123 | if (rtl_tbi_enabled(tp)) |
3113 | netif_info(tp, link, dev, "TBI auto-negotiating\n"); | 3124 | netif_info(tp, link, dev, "TBI auto-negotiating\n"); |
3114 | } | 3125 | } |
3115 | 3126 | ||
@@ -3319,9 +3330,16 @@ static void r810x_phy_power_up(struct rtl8169_private *tp) | |||
3319 | 3330 | ||
3320 | static void r810x_pll_power_down(struct rtl8169_private *tp) | 3331 | static void r810x_pll_power_down(struct rtl8169_private *tp) |
3321 | { | 3332 | { |
3333 | void __iomem *ioaddr = tp->mmio_addr; | ||
3334 | |||
3322 | if (__rtl8169_get_wol(tp) & WAKE_ANY) { | 3335 | if (__rtl8169_get_wol(tp) & WAKE_ANY) { |
3323 | rtl_writephy(tp, 0x1f, 0x0000); | 3336 | rtl_writephy(tp, 0x1f, 0x0000); |
3324 | rtl_writephy(tp, MII_BMCR, 0x0000); | 3337 | rtl_writephy(tp, MII_BMCR, 0x0000); |
3338 | |||
3339 | if (tp->mac_version == RTL_GIGA_MAC_VER_29 || | ||
3340 | tp->mac_version == RTL_GIGA_MAC_VER_30) | ||
3341 | RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | | ||
3342 | AcceptMulticast | AcceptMyPhys); | ||
3325 | return; | 3343 | return; |
3326 | } | 3344 | } |
3327 | 3345 | ||
@@ -3417,7 +3435,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) | |||
3417 | rtl_writephy(tp, MII_BMCR, 0x0000); | 3435 | rtl_writephy(tp, MII_BMCR, 0x0000); |
3418 | 3436 | ||
3419 | if (tp->mac_version == RTL_GIGA_MAC_VER_32 || | 3437 | if (tp->mac_version == RTL_GIGA_MAC_VER_32 || |
3420 | tp->mac_version == RTL_GIGA_MAC_VER_33) | 3438 | tp->mac_version == RTL_GIGA_MAC_VER_33 || |
3439 | tp->mac_version == RTL_GIGA_MAC_VER_34) | ||
3421 | RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | | 3440 | RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | |
3422 | AcceptMulticast | AcceptMyPhys); | 3441 | AcceptMulticast | AcceptMyPhys); |
3423 | return; | 3442 | return; |
@@ -3727,8 +3746,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3727 | tp->features |= rtl_try_msi(pdev, ioaddr, cfg); | 3746 | tp->features |= rtl_try_msi(pdev, ioaddr, cfg); |
3728 | RTL_W8(Cfg9346, Cfg9346_Lock); | 3747 | RTL_W8(Cfg9346, Cfg9346_Lock); |
3729 | 3748 | ||
3730 | if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) && | 3749 | if (rtl_tbi_enabled(tp)) { |
3731 | (RTL_R8(PHYstatus) & TBI_Enable)) { | ||
3732 | tp->set_speed = rtl8169_set_speed_tbi; | 3750 | tp->set_speed = rtl8169_set_speed_tbi; |
3733 | tp->get_settings = rtl8169_gset_tbi; | 3751 | tp->get_settings = rtl8169_gset_tbi; |
3734 | tp->phy_reset_enable = rtl8169_tbi_reset_enable; | 3752 | tp->phy_reset_enable = rtl8169_tbi_reset_enable; |
@@ -3777,6 +3795,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3777 | tp->intr_event = cfg->intr_event; | 3795 | tp->intr_event = cfg->intr_event; |
3778 | tp->napi_event = cfg->napi_event; | 3796 | tp->napi_event = cfg->napi_event; |
3779 | 3797 | ||
3798 | tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? | ||
3799 | ~(RxBOVF | RxFOVF) : ~0; | ||
3800 | |||
3780 | init_timer(&tp->timer); | 3801 | init_timer(&tp->timer); |
3781 | tp->timer.data = (unsigned long) dev; | 3802 | tp->timer.data = (unsigned long) dev; |
3782 | tp->timer.function = rtl8169_phy_timer; | 3803 | tp->timer.function = rtl8169_phy_timer; |
@@ -3988,6 +4009,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) | |||
3988 | while (RTL_R8(TxPoll) & NPQ) | 4009 | while (RTL_R8(TxPoll) & NPQ) |
3989 | udelay(20); | 4010 | udelay(20); |
3990 | } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) { | 4011 | } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) { |
4012 | RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); | ||
3991 | while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) | 4013 | while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) |
3992 | udelay(100); | 4014 | udelay(100); |
3993 | } else { | 4015 | } else { |
@@ -5314,7 +5336,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev, | |||
5314 | u32 status; | 5336 | u32 status; |
5315 | 5337 | ||
5316 | rmb(); | 5338 | rmb(); |
5317 | status = le32_to_cpu(desc->opts1); | 5339 | status = le32_to_cpu(desc->opts1) & tp->opts1_mask; |
5318 | 5340 | ||
5319 | if (status & DescOwn) | 5341 | if (status & DescOwn) |
5320 | break; | 5342 | break; |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index bf2404ae3b87..4479a45f7329 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/phy.h> | 31 | #include <linux/phy.h> |
32 | #include <linux/cache.h> | 32 | #include <linux/cache.h> |
33 | #include <linux/io.h> | 33 | #include <linux/io.h> |
34 | #include <linux/interrupt.h> | ||
34 | #include <linux/pm_runtime.h> | 35 | #include <linux/pm_runtime.h> |
35 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
36 | #include <linux/ethtool.h> | 37 | #include <linux/ethtool.h> |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 76dcadfaaa43..de9afebe1830 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
@@ -1050,7 +1050,6 @@ static int efx_init_io(struct efx_nic *efx) | |||
1050 | { | 1050 | { |
1051 | struct pci_dev *pci_dev = efx->pci_dev; | 1051 | struct pci_dev *pci_dev = efx->pci_dev; |
1052 | dma_addr_t dma_mask = efx->type->max_dma_mask; | 1052 | dma_addr_t dma_mask = efx->type->max_dma_mask; |
1053 | bool use_wc; | ||
1054 | int rc; | 1053 | int rc; |
1055 | 1054 | ||
1056 | netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); | 1055 | netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); |
@@ -1101,21 +1100,8 @@ static int efx_init_io(struct efx_nic *efx) | |||
1101 | rc = -EIO; | 1100 | rc = -EIO; |
1102 | goto fail3; | 1101 | goto fail3; |
1103 | } | 1102 | } |
1104 | 1103 | efx->membase = ioremap_nocache(efx->membase_phys, | |
1105 | /* bug22643: If SR-IOV is enabled then tx push over a write combined | 1104 | efx->type->mem_map_size); |
1106 | * mapping is unsafe. We need to disable write combining in this case. | ||
1107 | * MSI is unsupported when SR-IOV is enabled, and the firmware will | ||
1108 | * have removed the MSI capability. So write combining is safe if | ||
1109 | * there is an MSI capability. | ||
1110 | */ | ||
1111 | use_wc = (!EFX_WORKAROUND_22643(efx) || | ||
1112 | pci_find_capability(pci_dev, PCI_CAP_ID_MSI)); | ||
1113 | if (use_wc) | ||
1114 | efx->membase = ioremap_wc(efx->membase_phys, | ||
1115 | efx->type->mem_map_size); | ||
1116 | else | ||
1117 | efx->membase = ioremap_nocache(efx->membase_phys, | ||
1118 | efx->type->mem_map_size); | ||
1119 | if (!efx->membase) { | 1105 | if (!efx->membase) { |
1120 | netif_err(efx, probe, efx->net_dev, | 1106 | netif_err(efx, probe, efx->net_dev, |
1121 | "could not map memory BAR at %llx+%x\n", | 1107 | "could not map memory BAR at %llx+%x\n", |
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h index cc978803d484..751d1ec112cc 100644 --- a/drivers/net/ethernet/sfc/io.h +++ b/drivers/net/ethernet/sfc/io.h | |||
@@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, | |||
103 | _efx_writed(efx, value->u32[2], reg + 8); | 103 | _efx_writed(efx, value->u32[2], reg + 8); |
104 | _efx_writed(efx, value->u32[3], reg + 12); | 104 | _efx_writed(efx, value->u32[3], reg + 12); |
105 | #endif | 105 | #endif |
106 | wmb(); | ||
107 | mmiowb(); | 106 | mmiowb(); |
108 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 107 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
109 | } | 108 | } |
@@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, | |||
126 | __raw_writel((__force u32)value->u32[0], membase + addr); | 125 | __raw_writel((__force u32)value->u32[0], membase + addr); |
127 | __raw_writel((__force u32)value->u32[1], membase + addr + 4); | 126 | __raw_writel((__force u32)value->u32[1], membase + addr + 4); |
128 | #endif | 127 | #endif |
129 | wmb(); | ||
130 | mmiowb(); | 128 | mmiowb(); |
131 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 129 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
132 | } | 130 | } |
@@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, | |||
141 | 139 | ||
142 | /* No lock required */ | 140 | /* No lock required */ |
143 | _efx_writed(efx, value->u32[0], reg); | 141 | _efx_writed(efx, value->u32[0], reg); |
144 | wmb(); | ||
145 | } | 142 | } |
146 | 143 | ||
147 | /* Read a 128-bit CSR, locking as appropriate. */ | 144 | /* Read a 128-bit CSR, locking as appropriate. */ |
@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, | |||
152 | 149 | ||
153 | spin_lock_irqsave(&efx->biu_lock, flags); | 150 | spin_lock_irqsave(&efx->biu_lock, flags); |
154 | value->u32[0] = _efx_readd(efx, reg + 0); | 151 | value->u32[0] = _efx_readd(efx, reg + 0); |
155 | rmb(); | ||
156 | value->u32[1] = _efx_readd(efx, reg + 4); | 152 | value->u32[1] = _efx_readd(efx, reg + 4); |
157 | value->u32[2] = _efx_readd(efx, reg + 8); | 153 | value->u32[2] = _efx_readd(efx, reg + 8); |
158 | value->u32[3] = _efx_readd(efx, reg + 12); | 154 | value->u32[3] = _efx_readd(efx, reg + 12); |
@@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, | |||
175 | value->u64[0] = (__force __le64)__raw_readq(membase + addr); | 171 | value->u64[0] = (__force __le64)__raw_readq(membase + addr); |
176 | #else | 172 | #else |
177 | value->u32[0] = (__force __le32)__raw_readl(membase + addr); | 173 | value->u32[0] = (__force __le32)__raw_readl(membase + addr); |
178 | rmb(); | ||
179 | value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); | 174 | value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); |
180 | #endif | 175 | #endif |
181 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 176 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
@@ -249,7 +244,6 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, | |||
249 | _efx_writed(efx, value->u32[2], reg + 8); | 244 | _efx_writed(efx, value->u32[2], reg + 8); |
250 | _efx_writed(efx, value->u32[3], reg + 12); | 245 | _efx_writed(efx, value->u32[3], reg + 12); |
251 | #endif | 246 | #endif |
252 | wmb(); | ||
253 | } | 247 | } |
254 | #define efx_writeo_page(efx, value, reg, page) \ | 248 | #define efx_writeo_page(efx, value, reg, page) \ |
255 | _efx_writeo_page(efx, value, \ | 249 | _efx_writeo_page(efx, value, \ |
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 3dd45ed61f0a..81a425397468 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c | |||
@@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) | |||
50 | return &nic_data->mcdi; | 50 | return &nic_data->mcdi; |
51 | } | 51 | } |
52 | 52 | ||
53 | static inline void | ||
54 | efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg) | ||
55 | { | ||
56 | struct siena_nic_data *nic_data = efx->nic_data; | ||
57 | value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg); | ||
58 | } | ||
59 | |||
60 | static inline void | ||
61 | efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg) | ||
62 | { | ||
63 | struct siena_nic_data *nic_data = efx->nic_data; | ||
64 | __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg); | ||
65 | } | ||
66 | |||
67 | void efx_mcdi_init(struct efx_nic *efx) | 53 | void efx_mcdi_init(struct efx_nic *efx) |
68 | { | 54 | { |
69 | struct efx_mcdi_iface *mcdi; | 55 | struct efx_mcdi_iface *mcdi; |
@@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, | |||
84 | const u8 *inbuf, size_t inlen) | 70 | const u8 *inbuf, size_t inlen) |
85 | { | 71 | { |
86 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 72 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
87 | unsigned pdu = MCDI_PDU(efx); | 73 | unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); |
88 | unsigned doorbell = MCDI_DOORBELL(efx); | 74 | unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); |
89 | unsigned int i; | 75 | unsigned int i; |
90 | efx_dword_t hdr; | 76 | efx_dword_t hdr; |
91 | u32 xflags, seqno; | 77 | u32 xflags, seqno; |
@@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, | |||
106 | MCDI_HEADER_SEQ, seqno, | 92 | MCDI_HEADER_SEQ, seqno, |
107 | MCDI_HEADER_XFLAGS, xflags); | 93 | MCDI_HEADER_XFLAGS, xflags); |
108 | 94 | ||
109 | efx_mcdi_writed(efx, &hdr, pdu); | 95 | efx_writed(efx, &hdr, pdu); |
110 | 96 | ||
111 | for (i = 0; i < inlen; i += 4) | 97 | for (i = 0; i < inlen; i += 4) |
112 | efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), | 98 | _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); |
113 | pdu + 4 + i); | 99 | |
100 | /* Ensure the payload is written out before the header */ | ||
101 | wmb(); | ||
114 | 102 | ||
115 | /* ring the doorbell with a distinctive value */ | 103 | /* ring the doorbell with a distinctive value */ |
116 | EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); | 104 | _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); |
117 | efx_mcdi_writed(efx, &hdr, doorbell); | ||
118 | } | 105 | } |
119 | 106 | ||
120 | static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) | 107 | static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) |
121 | { | 108 | { |
122 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 109 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
123 | unsigned int pdu = MCDI_PDU(efx); | 110 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); |
124 | int i; | 111 | int i; |
125 | 112 | ||
126 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | 113 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); |
127 | BUG_ON(outlen & 3 || outlen >= 0x100); | 114 | BUG_ON(outlen & 3 || outlen >= 0x100); |
128 | 115 | ||
129 | for (i = 0; i < outlen; i += 4) | 116 | for (i = 0; i < outlen; i += 4) |
130 | efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); | 117 | *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); |
131 | } | 118 | } |
132 | 119 | ||
133 | static int efx_mcdi_poll(struct efx_nic *efx) | 120 | static int efx_mcdi_poll(struct efx_nic *efx) |
@@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
135 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 122 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
136 | unsigned int time, finish; | 123 | unsigned int time, finish; |
137 | unsigned int respseq, respcmd, error; | 124 | unsigned int respseq, respcmd, error; |
138 | unsigned int pdu = MCDI_PDU(efx); | 125 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); |
139 | unsigned int rc, spins; | 126 | unsigned int rc, spins; |
140 | efx_dword_t reg; | 127 | efx_dword_t reg; |
141 | 128 | ||
@@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
161 | 148 | ||
162 | time = get_seconds(); | 149 | time = get_seconds(); |
163 | 150 | ||
164 | efx_mcdi_readd(efx, ®, pdu); | 151 | rmb(); |
152 | efx_readd(efx, ®, pdu); | ||
165 | 153 | ||
166 | /* All 1's indicates that shared memory is in reset (and is | 154 | /* All 1's indicates that shared memory is in reset (and is |
167 | * not a valid header). Wait for it to come out reset before | 155 | * not a valid header). Wait for it to come out reset before |
@@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
188 | respseq, mcdi->seqno); | 176 | respseq, mcdi->seqno); |
189 | rc = EIO; | 177 | rc = EIO; |
190 | } else if (error) { | 178 | } else if (error) { |
191 | efx_mcdi_readd(efx, ®, pdu + 4); | 179 | efx_readd(efx, ®, pdu + 4); |
192 | switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { | 180 | switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { |
193 | #define TRANSLATE_ERROR(name) \ | 181 | #define TRANSLATE_ERROR(name) \ |
194 | case MC_CMD_ERR_ ## name: \ | 182 | case MC_CMD_ERR_ ## name: \ |
@@ -222,21 +210,21 @@ out: | |||
222 | /* Test and clear MC-rebooted flag for this port/function */ | 210 | /* Test and clear MC-rebooted flag for this port/function */ |
223 | int efx_mcdi_poll_reboot(struct efx_nic *efx) | 211 | int efx_mcdi_poll_reboot(struct efx_nic *efx) |
224 | { | 212 | { |
225 | unsigned int addr = MCDI_REBOOT_FLAG(efx); | 213 | unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); |
226 | efx_dword_t reg; | 214 | efx_dword_t reg; |
227 | uint32_t value; | 215 | uint32_t value; |
228 | 216 | ||
229 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | 217 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) |
230 | return false; | 218 | return false; |
231 | 219 | ||
232 | efx_mcdi_readd(efx, ®, addr); | 220 | efx_readd(efx, ®, addr); |
233 | value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); | 221 | value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); |
234 | 222 | ||
235 | if (value == 0) | 223 | if (value == 0) |
236 | return 0; | 224 | return 0; |
237 | 225 | ||
238 | EFX_ZERO_DWORD(reg); | 226 | EFX_ZERO_DWORD(reg); |
239 | efx_mcdi_writed(efx, ®, addr); | 227 | efx_writed(efx, ®, addr); |
240 | 228 | ||
241 | if (value == MC_STATUS_DWORD_ASSERT) | 229 | if (value == MC_STATUS_DWORD_ASSERT) |
242 | return -EINTR; | 230 | return -EINTR; |
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index bafa23a6874c..3edfbaf5f022 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c | |||
@@ -1936,13 +1936,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) | |||
1936 | 1936 | ||
1937 | size = min_t(size_t, table->step, 16); | 1937 | size = min_t(size_t, table->step, 16); |
1938 | 1938 | ||
1939 | if (table->offset >= efx->type->mem_map_size) { | ||
1940 | /* No longer mapped; return dummy data */ | ||
1941 | memcpy(buf, "\xde\xc0\xad\xde", 4); | ||
1942 | buf += table->rows * size; | ||
1943 | continue; | ||
1944 | } | ||
1945 | |||
1946 | for (i = 0; i < table->rows; i++) { | 1939 | for (i = 0; i < table->rows; i++) { |
1947 | switch (table->step) { | 1940 | switch (table->step) { |
1948 | case 4: /* 32-bit register or SRAM */ | 1941 | case 4: /* 32-bit register or SRAM */ |
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index b5b288628c6b..5fb24d3aa3ca 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h | |||
@@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx) | |||
143 | /** | 143 | /** |
144 | * struct siena_nic_data - Siena NIC state | 144 | * struct siena_nic_data - Siena NIC state |
145 | * @mcdi: Management-Controller-to-Driver Interface | 145 | * @mcdi: Management-Controller-to-Driver Interface |
146 | * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable. | ||
147 | * @wol_filter_id: Wake-on-LAN packet filter id | 146 | * @wol_filter_id: Wake-on-LAN packet filter id |
148 | */ | 147 | */ |
149 | struct siena_nic_data { | 148 | struct siena_nic_data { |
150 | struct efx_mcdi_iface mcdi; | 149 | struct efx_mcdi_iface mcdi; |
151 | void __iomem *mcdi_smem; | ||
152 | int wol_filter_id; | 150 | int wol_filter_id; |
153 | }; | 151 | }; |
154 | 152 | ||
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 4fdd148747b2..cc2549cb7076 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c | |||
@@ -252,26 +252,12 @@ static int siena_probe_nic(struct efx_nic *efx) | |||
252 | efx_reado(efx, ®, FR_AZ_CS_DEBUG); | 252 | efx_reado(efx, ®, FR_AZ_CS_DEBUG); |
253 | efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; | 253 | efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; |
254 | 254 | ||
255 | /* Initialise MCDI */ | ||
256 | nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys + | ||
257 | FR_CZ_MC_TREG_SMEM, | ||
258 | FR_CZ_MC_TREG_SMEM_STEP * | ||
259 | FR_CZ_MC_TREG_SMEM_ROWS); | ||
260 | if (!nic_data->mcdi_smem) { | ||
261 | netif_err(efx, probe, efx->net_dev, | ||
262 | "could not map MCDI at %llx+%x\n", | ||
263 | (unsigned long long)efx->membase_phys + | ||
264 | FR_CZ_MC_TREG_SMEM, | ||
265 | FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); | ||
266 | rc = -ENOMEM; | ||
267 | goto fail1; | ||
268 | } | ||
269 | efx_mcdi_init(efx); | 255 | efx_mcdi_init(efx); |
270 | 256 | ||
271 | /* Recover from a failed assertion before probing */ | 257 | /* Recover from a failed assertion before probing */ |
272 | rc = efx_mcdi_handle_assertion(efx); | 258 | rc = efx_mcdi_handle_assertion(efx); |
273 | if (rc) | 259 | if (rc) |
274 | goto fail2; | 260 | goto fail1; |
275 | 261 | ||
276 | /* Let the BMC know that the driver is now in charge of link and | 262 | /* Let the BMC know that the driver is now in charge of link and |
277 | * filter settings. We must do this before we reset the NIC */ | 263 | * filter settings. We must do this before we reset the NIC */ |
@@ -326,7 +312,6 @@ fail4: | |||
326 | fail3: | 312 | fail3: |
327 | efx_mcdi_drv_attach(efx, false, NULL); | 313 | efx_mcdi_drv_attach(efx, false, NULL); |
328 | fail2: | 314 | fail2: |
329 | iounmap(nic_data->mcdi_smem); | ||
330 | fail1: | 315 | fail1: |
331 | kfree(efx->nic_data); | 316 | kfree(efx->nic_data); |
332 | return rc; | 317 | return rc; |
@@ -406,8 +391,6 @@ static int siena_init_nic(struct efx_nic *efx) | |||
406 | 391 | ||
407 | static void siena_remove_nic(struct efx_nic *efx) | 392 | static void siena_remove_nic(struct efx_nic *efx) |
408 | { | 393 | { |
409 | struct siena_nic_data *nic_data = efx->nic_data; | ||
410 | |||
411 | efx_nic_free_buffer(efx, &efx->irq_status); | 394 | efx_nic_free_buffer(efx, &efx->irq_status); |
412 | 395 | ||
413 | siena_reset_hw(efx, RESET_TYPE_ALL); | 396 | siena_reset_hw(efx, RESET_TYPE_ALL); |
@@ -417,8 +400,7 @@ static void siena_remove_nic(struct efx_nic *efx) | |||
417 | efx_mcdi_drv_attach(efx, false, NULL); | 400 | efx_mcdi_drv_attach(efx, false, NULL); |
418 | 401 | ||
419 | /* Tear down the private nic state */ | 402 | /* Tear down the private nic state */ |
420 | iounmap(nic_data->mcdi_smem); | 403 | kfree(efx->nic_data); |
421 | kfree(nic_data); | ||
422 | efx->nic_data = NULL; | 404 | efx->nic_data = NULL; |
423 | } | 405 | } |
424 | 406 | ||
@@ -658,7 +640,8 @@ const struct efx_nic_type siena_a0_nic_type = { | |||
658 | .default_mac_ops = &efx_mcdi_mac_operations, | 640 | .default_mac_ops = &efx_mcdi_mac_operations, |
659 | 641 | ||
660 | .revision = EFX_REV_SIENA_A0, | 642 | .revision = EFX_REV_SIENA_A0, |
661 | .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ | 643 | .mem_map_size = (FR_CZ_MC_TREG_SMEM + |
644 | FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), | ||
662 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, | 645 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, |
663 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, | 646 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, |
664 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, | 647 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, |
diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h index 99ff11400cef..e4dd3a7f304b 100644 --- a/drivers/net/ethernet/sfc/workarounds.h +++ b/drivers/net/ethernet/sfc/workarounds.h | |||
@@ -38,8 +38,6 @@ | |||
38 | #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS | 38 | #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS |
39 | /* Legacy interrupt storm when interrupt fifo fills */ | 39 | /* Legacy interrupt storm when interrupt fifo fills */ |
40 | #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA | 40 | #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA |
41 | /* Write combining and sriov=enabled are incompatible */ | ||
42 | #define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA | ||
43 | 41 | ||
44 | /* Spurious parity errors in TSORT buffers */ | 42 | /* Spurious parity errors in TSORT buffers */ |
45 | #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A | 43 | #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A |
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index f07a72150c63..12068219059a 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c | |||
@@ -2452,14 +2452,13 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id) | |||
2452 | struct net_device *dev = dev_id; | 2452 | struct net_device *dev = dev_id; |
2453 | struct cas *cp = netdev_priv(dev); | 2453 | struct cas *cp = netdev_priv(dev); |
2454 | unsigned long flags; | 2454 | unsigned long flags; |
2455 | int ring; | 2455 | int ring = (irq == cp->pci_irq_INTC) ? 2 : 3; |
2456 | u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); | 2456 | u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); |
2457 | 2457 | ||
2458 | /* check for shared irq */ | 2458 | /* check for shared irq */ |
2459 | if (status == 0) | 2459 | if (status == 0) |
2460 | return IRQ_NONE; | 2460 | return IRQ_NONE; |
2461 | 2461 | ||
2462 | ring = (irq == cp->pci_irq_INTC) ? 2 : 3; | ||
2463 | spin_lock_irqsave(&cp->lock, flags); | 2462 | spin_lock_irqsave(&cp->lock, flags); |
2464 | if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ | 2463 | if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ |
2465 | #ifdef USE_NAPI | 2464 | #ifdef USE_NAPI |
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index dfc82720065a..ed2a3977c6e7 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c | |||
@@ -799,5 +799,11 @@ static void __exit cleanup_netconsole(void) | |||
799 | } | 799 | } |
800 | } | 800 | } |
801 | 801 | ||
802 | module_init(init_netconsole); | 802 | /* |
803 | * Use late_initcall to ensure netconsole is | ||
804 | * initialized after network device driver if built-in. | ||
805 | * | ||
806 | * late_initcall() and module_init() are identical if built as module. | ||
807 | */ | ||
808 | late_initcall(init_netconsole); | ||
803 | module_exit(cleanup_netconsole); | 809 | module_exit(cleanup_netconsole); |
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index 0620ba963508..04bb8fcc0cb5 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c | |||
@@ -25,8 +25,9 @@ | |||
25 | /* DP83865 phy identifier values */ | 25 | /* DP83865 phy identifier values */ |
26 | #define DP83865_PHY_ID 0x20005c7a | 26 | #define DP83865_PHY_ID 0x20005c7a |
27 | 27 | ||
28 | #define DP83865_INT_MASK_REG 0x15 | 28 | #define DP83865_INT_STATUS 0x14 |
29 | #define DP83865_INT_MASK_STATUS 0x14 | 29 | #define DP83865_INT_MASK 0x15 |
30 | #define DP83865_INT_CLEAR 0x17 | ||
30 | 31 | ||
31 | #define DP83865_INT_REMOTE_FAULT 0x0008 | 32 | #define DP83865_INT_REMOTE_FAULT 0x0008 |
32 | #define DP83865_INT_ANE_COMPLETED 0x0010 | 33 | #define DP83865_INT_ANE_COMPLETED 0x0010 |
@@ -68,21 +69,25 @@ static int ns_config_intr(struct phy_device *phydev) | |||
68 | int err; | 69 | int err; |
69 | 70 | ||
70 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) | 71 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) |
71 | err = phy_write(phydev, DP83865_INT_MASK_REG, | 72 | err = phy_write(phydev, DP83865_INT_MASK, |
72 | DP83865_INT_MASK_DEFAULT); | 73 | DP83865_INT_MASK_DEFAULT); |
73 | else | 74 | else |
74 | err = phy_write(phydev, DP83865_INT_MASK_REG, 0); | 75 | err = phy_write(phydev, DP83865_INT_MASK, 0); |
75 | 76 | ||
76 | return err; | 77 | return err; |
77 | } | 78 | } |
78 | 79 | ||
79 | static int ns_ack_interrupt(struct phy_device *phydev) | 80 | static int ns_ack_interrupt(struct phy_device *phydev) |
80 | { | 81 | { |
81 | int ret = phy_read(phydev, DP83865_INT_MASK_STATUS); | 82 | int ret = phy_read(phydev, DP83865_INT_STATUS); |
82 | if (ret < 0) | 83 | if (ret < 0) |
83 | return ret; | 84 | return ret; |
84 | 85 | ||
85 | return 0; | 86 | /* Clear the interrupt status bit by writing a “1” |
87 | * to the corresponding bit in INT_CLEAR (2:0 are reserved) */ | ||
88 | ret = phy_write(phydev, DP83865_INT_CLEAR, ret & ~0x7); | ||
89 | |||
90 | return ret; | ||
86 | } | 91 | } |
87 | 92 | ||
88 | static void ns_giga_speed_fallback(struct phy_device *phydev, int mode) | 93 | static void ns_giga_speed_fallback(struct phy_device *phydev, int mode) |
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 10e5d985afa3..edfa15d2e795 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c | |||
@@ -1465,7 +1465,12 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | |||
1465 | continue; | 1465 | continue; |
1466 | } | 1466 | } |
1467 | 1467 | ||
1468 | mtu = pch->chan->mtu - hdrlen; | 1468 | /* |
1469 | * hdrlen includes the 2-byte PPP protocol field, but the | ||
1470 | * MTU counts only the payload excluding the protocol field. | ||
1471 | * (RFC1661 Section 2) | ||
1472 | */ | ||
1473 | mtu = pch->chan->mtu - (hdrlen - 2); | ||
1469 | if (mtu < 4) | 1474 | if (mtu < 4) |
1470 | mtu = 4; | 1475 | mtu = 4; |
1471 | if (flen > mtu) | 1476 | if (flen > mtu) |
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 86ac38c96bcf..3bb131137033 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c | |||
@@ -80,13 +80,13 @@ static int rionet_capable = 1; | |||
80 | */ | 80 | */ |
81 | static struct rio_dev **rionet_active; | 81 | static struct rio_dev **rionet_active; |
82 | 82 | ||
83 | #define is_rionet_capable(pef, src_ops, dst_ops) \ | 83 | #define is_rionet_capable(src_ops, dst_ops) \ |
84 | ((pef & RIO_PEF_INB_MBOX) && \ | 84 | ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ |
85 | (pef & RIO_PEF_INB_DOORBELL) && \ | 85 | (dst_ops & RIO_DST_OPS_DATA_MSG) && \ |
86 | (src_ops & RIO_SRC_OPS_DOORBELL) && \ | 86 | (src_ops & RIO_SRC_OPS_DOORBELL) && \ |
87 | (dst_ops & RIO_DST_OPS_DOORBELL)) | 87 | (dst_ops & RIO_DST_OPS_DOORBELL)) |
88 | #define dev_rionet_capable(dev) \ | 88 | #define dev_rionet_capable(dev) \ |
89 | is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops) | 89 | is_rionet_capable(dev->src_ops, dev->dst_ops) |
90 | 90 | ||
91 | #define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) | 91 | #define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) |
92 | #define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) | 92 | #define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) |
@@ -282,7 +282,6 @@ static int rionet_open(struct net_device *ndev) | |||
282 | { | 282 | { |
283 | int i, rc = 0; | 283 | int i, rc = 0; |
284 | struct rionet_peer *peer, *tmp; | 284 | struct rionet_peer *peer, *tmp; |
285 | u32 pwdcsr; | ||
286 | struct rionet_private *rnet = netdev_priv(ndev); | 285 | struct rionet_private *rnet = netdev_priv(ndev); |
287 | 286 | ||
288 | if (netif_msg_ifup(rnet)) | 287 | if (netif_msg_ifup(rnet)) |
@@ -332,13 +331,8 @@ static int rionet_open(struct net_device *ndev) | |||
332 | continue; | 331 | continue; |
333 | } | 332 | } |
334 | 333 | ||
335 | /* | 334 | /* Send a join message */ |
336 | * If device has initialized inbound doorbells, | 335 | rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); |
337 | * send a join message | ||
338 | */ | ||
339 | rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr); | ||
340 | if (pwdcsr & RIO_DOORBELL_AVAIL) | ||
341 | rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); | ||
342 | } | 336 | } |
343 | 337 | ||
344 | out: | 338 | out: |
@@ -492,7 +486,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) | |||
492 | static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) | 486 | static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) |
493 | { | 487 | { |
494 | int rc = -ENODEV; | 488 | int rc = -ENODEV; |
495 | u32 lpef, lsrc_ops, ldst_ops; | 489 | u32 lsrc_ops, ldst_ops; |
496 | struct rionet_peer *peer; | 490 | struct rionet_peer *peer; |
497 | struct net_device *ndev = NULL; | 491 | struct net_device *ndev = NULL; |
498 | 492 | ||
@@ -515,12 +509,11 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) | |||
515 | * on later probes | 509 | * on later probes |
516 | */ | 510 | */ |
517 | if (!rionet_check) { | 511 | if (!rionet_check) { |
518 | rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef); | ||
519 | rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, | 512 | rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, |
520 | &lsrc_ops); | 513 | &lsrc_ops); |
521 | rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, | 514 | rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, |
522 | &ldst_ops); | 515 | &ldst_ops); |
523 | if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) { | 516 | if (!is_rionet_capable(lsrc_ops, ldst_ops)) { |
524 | printk(KERN_ERR | 517 | printk(KERN_ERR |
525 | "%s: local device is not network capable\n", | 518 | "%s: local device is not network capable\n", |
526 | DRV_NAME); | 519 | DRV_NAME); |
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 15772b1b6a91..13c1f044b40d 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c | |||
@@ -59,6 +59,7 @@ | |||
59 | #define USB_PRODUCT_IPHONE_3G 0x1292 | 59 | #define USB_PRODUCT_IPHONE_3G 0x1292 |
60 | #define USB_PRODUCT_IPHONE_3GS 0x1294 | 60 | #define USB_PRODUCT_IPHONE_3GS 0x1294 |
61 | #define USB_PRODUCT_IPHONE_4 0x1297 | 61 | #define USB_PRODUCT_IPHONE_4 0x1297 |
62 | #define USB_PRODUCT_IPHONE_4_VZW 0x129c | ||
62 | 63 | ||
63 | #define IPHETH_USBINTF_CLASS 255 | 64 | #define IPHETH_USBINTF_CLASS 255 |
64 | #define IPHETH_USBINTF_SUBCLASS 253 | 65 | #define IPHETH_USBINTF_SUBCLASS 253 |
@@ -98,6 +99,10 @@ static struct usb_device_id ipheth_table[] = { | |||
98 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, | 99 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, |
99 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, | 100 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, |
100 | IPHETH_USBINTF_PROTO) }, | 101 | IPHETH_USBINTF_PROTO) }, |
102 | { USB_DEVICE_AND_INTERFACE_INFO( | ||
103 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW, | ||
104 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, | ||
105 | IPHETH_USBINTF_PROTO) }, | ||
101 | { } | 106 | { } |
102 | }; | 107 | }; |
103 | MODULE_DEVICE_TABLE(usb, ipheth_table); | 108 | MODULE_DEVICE_TABLE(usb, ipheth_table); |
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index 2d4c0910295b..2d394af82171 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c | |||
@@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah, | |||
41 | case ADC_DC_CAL: | 41 | case ADC_DC_CAL: |
42 | /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */ | 42 | /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */ |
43 | if (!IS_CHAN_B(chan) && | 43 | if (!IS_CHAN_B(chan) && |
44 | !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) | 44 | !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) && |
45 | IS_CHAN_HT20(chan))) | ||
45 | supported = true; | 46 | supported = true; |
46 | break; | 47 | break; |
47 | } | 48 | } |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index a73e50d80cbb..51398f0063e2 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c | |||
@@ -50,7 +50,7 @@ static int ar9003_hw_power_interpolate(int32_t x, | |||
50 | static const struct ar9300_eeprom ar9300_default = { | 50 | static const struct ar9300_eeprom ar9300_default = { |
51 | .eepromVersion = 2, | 51 | .eepromVersion = 2, |
52 | .templateVersion = 2, | 52 | .templateVersion = 2, |
53 | .macAddr = {1, 2, 3, 4, 5, 6}, | 53 | .macAddr = {0, 2, 3, 4, 5, 6}, |
54 | .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 54 | .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
55 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, | 55 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, |
56 | .baseEepHeader = { | 56 | .baseEepHeader = { |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 95147948794d..4956d09cb589 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c | |||
@@ -678,7 +678,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, | |||
678 | REG_WRITE_ARRAY(&ah->iniModesAdditional, | 678 | REG_WRITE_ARRAY(&ah->iniModesAdditional, |
679 | modesIndex, regWrites); | 679 | modesIndex, regWrites); |
680 | 680 | ||
681 | if (AR_SREV_9300(ah)) | 681 | if (AR_SREV_9330(ah)) |
682 | REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites); | 682 | REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites); |
683 | 683 | ||
684 | if (AR_SREV_9340(ah) && !ah->is_clk_25mhz) | 684 | if (AR_SREV_9340(ah) && !ah->is_clk_25mhz) |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 7910165cf0e6..a16f53994a7e 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -2272,7 +2272,11 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) | |||
2272 | 2272 | ||
2273 | mutex_lock(&sc->mutex); | 2273 | mutex_lock(&sc->mutex); |
2274 | ah->coverage_class = coverage_class; | 2274 | ah->coverage_class = coverage_class; |
2275 | |||
2276 | ath9k_ps_wakeup(sc); | ||
2275 | ath9k_hw_init_global_settings(ah); | 2277 | ath9k_hw_init_global_settings(ah); |
2278 | ath9k_ps_restore(sc); | ||
2279 | |||
2276 | mutex_unlock(&sc->mutex); | 2280 | mutex_unlock(&sc->mutex); |
2277 | } | 2281 | } |
2278 | 2282 | ||
@@ -2288,6 +2292,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop) | |||
2288 | mutex_lock(&sc->mutex); | 2292 | mutex_lock(&sc->mutex); |
2289 | cancel_delayed_work_sync(&sc->tx_complete_work); | 2293 | cancel_delayed_work_sync(&sc->tx_complete_work); |
2290 | 2294 | ||
2295 | if (ah->ah_flags & AH_UNPLUGGED) { | ||
2296 | ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n"); | ||
2297 | mutex_unlock(&sc->mutex); | ||
2298 | return; | ||
2299 | } | ||
2300 | |||
2291 | if (sc->sc_flags & SC_OP_INVALID) { | 2301 | if (sc->sc_flags & SC_OP_INVALID) { |
2292 | ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); | 2302 | ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); |
2293 | mutex_unlock(&sc->mutex); | 2303 | mutex_unlock(&sc->mutex); |
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 782b8f3ae58f..af351ecd87c4 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c | |||
@@ -1115,8 +1115,10 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | |||
1115 | * the high througput speed in 802.11n networks. | 1115 | * the high througput speed in 802.11n networks. |
1116 | */ | 1116 | */ |
1117 | 1117 | ||
1118 | if (!is_main_vif(ar, vif)) | 1118 | if (!is_main_vif(ar, vif)) { |
1119 | mutex_lock(&ar->mutex); | ||
1119 | goto err_softw; | 1120 | goto err_softw; |
1121 | } | ||
1120 | 1122 | ||
1121 | /* | 1123 | /* |
1122 | * While the hardware supports *catch-all* key, for offloading | 1124 | * While the hardware supports *catch-all* key, for offloading |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 24077023d484..56fa3a3648c4 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -1637,7 +1637,8 @@ static void handle_irq_beacon(struct b43_wldev *dev) | |||
1637 | u32 cmd, beacon0_valid, beacon1_valid; | 1637 | u32 cmd, beacon0_valid, beacon1_valid; |
1638 | 1638 | ||
1639 | if (!b43_is_mode(wl, NL80211_IFTYPE_AP) && | 1639 | if (!b43_is_mode(wl, NL80211_IFTYPE_AP) && |
1640 | !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) | 1640 | !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) && |
1641 | !b43_is_mode(wl, NL80211_IFTYPE_ADHOC)) | ||
1641 | return; | 1642 | return; |
1642 | 1643 | ||
1643 | /* This is the bottom half of the asynchronous beacon update. */ | 1644 | /* This is the bottom half of the asynchronous beacon update. */ |
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 3774dd034746..ef9ad79d1bfd 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c | |||
@@ -1903,15 +1903,17 @@ static void ipw2100_down(struct ipw2100_priv *priv) | |||
1903 | static int ipw2100_net_init(struct net_device *dev) | 1903 | static int ipw2100_net_init(struct net_device *dev) |
1904 | { | 1904 | { |
1905 | struct ipw2100_priv *priv = libipw_priv(dev); | 1905 | struct ipw2100_priv *priv = libipw_priv(dev); |
1906 | |||
1907 | return ipw2100_up(priv, 1); | ||
1908 | } | ||
1909 | |||
1910 | static int ipw2100_wdev_init(struct net_device *dev) | ||
1911 | { | ||
1912 | struct ipw2100_priv *priv = libipw_priv(dev); | ||
1906 | const struct libipw_geo *geo = libipw_get_geo(priv->ieee); | 1913 | const struct libipw_geo *geo = libipw_get_geo(priv->ieee); |
1907 | struct wireless_dev *wdev = &priv->ieee->wdev; | 1914 | struct wireless_dev *wdev = &priv->ieee->wdev; |
1908 | int ret; | ||
1909 | int i; | 1915 | int i; |
1910 | 1916 | ||
1911 | ret = ipw2100_up(priv, 1); | ||
1912 | if (ret) | ||
1913 | return ret; | ||
1914 | |||
1915 | memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); | 1917 | memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); |
1916 | 1918 | ||
1917 | /* fill-out priv->ieee->bg_band */ | 1919 | /* fill-out priv->ieee->bg_band */ |
@@ -6350,9 +6352,13 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, | |||
6350 | "Error calling register_netdev.\n"); | 6352 | "Error calling register_netdev.\n"); |
6351 | goto fail; | 6353 | goto fail; |
6352 | } | 6354 | } |
6355 | registered = 1; | ||
6356 | |||
6357 | err = ipw2100_wdev_init(dev); | ||
6358 | if (err) | ||
6359 | goto fail; | ||
6353 | 6360 | ||
6354 | mutex_lock(&priv->action_mutex); | 6361 | mutex_lock(&priv->action_mutex); |
6355 | registered = 1; | ||
6356 | 6362 | ||
6357 | IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev)); | 6363 | IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev)); |
6358 | 6364 | ||
@@ -6389,7 +6395,8 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, | |||
6389 | 6395 | ||
6390 | fail_unlock: | 6396 | fail_unlock: |
6391 | mutex_unlock(&priv->action_mutex); | 6397 | mutex_unlock(&priv->action_mutex); |
6392 | 6398 | wiphy_unregister(priv->ieee->wdev.wiphy); | |
6399 | kfree(priv->ieee->bg_band.channels); | ||
6393 | fail: | 6400 | fail: |
6394 | if (dev) { | 6401 | if (dev) { |
6395 | if (registered) | 6402 | if (registered) |
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index f303df43ed3f..99a710dfe771 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c | |||
@@ -11426,16 +11426,23 @@ static void ipw_bg_down(struct work_struct *work) | |||
11426 | /* Called by register_netdev() */ | 11426 | /* Called by register_netdev() */ |
11427 | static int ipw_net_init(struct net_device *dev) | 11427 | static int ipw_net_init(struct net_device *dev) |
11428 | { | 11428 | { |
11429 | int rc = 0; | ||
11430 | struct ipw_priv *priv = libipw_priv(dev); | ||
11431 | |||
11432 | mutex_lock(&priv->mutex); | ||
11433 | if (ipw_up(priv)) | ||
11434 | rc = -EIO; | ||
11435 | mutex_unlock(&priv->mutex); | ||
11436 | |||
11437 | return rc; | ||
11438 | } | ||
11439 | |||
11440 | static int ipw_wdev_init(struct net_device *dev) | ||
11441 | { | ||
11429 | int i, rc = 0; | 11442 | int i, rc = 0; |
11430 | struct ipw_priv *priv = libipw_priv(dev); | 11443 | struct ipw_priv *priv = libipw_priv(dev); |
11431 | const struct libipw_geo *geo = libipw_get_geo(priv->ieee); | 11444 | const struct libipw_geo *geo = libipw_get_geo(priv->ieee); |
11432 | struct wireless_dev *wdev = &priv->ieee->wdev; | 11445 | struct wireless_dev *wdev = &priv->ieee->wdev; |
11433 | mutex_lock(&priv->mutex); | ||
11434 | |||
11435 | if (ipw_up(priv)) { | ||
11436 | rc = -EIO; | ||
11437 | goto out; | ||
11438 | } | ||
11439 | 11446 | ||
11440 | memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); | 11447 | memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); |
11441 | 11448 | ||
@@ -11520,13 +11527,9 @@ static int ipw_net_init(struct net_device *dev) | |||
11520 | set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); | 11527 | set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); |
11521 | 11528 | ||
11522 | /* With that information in place, we can now register the wiphy... */ | 11529 | /* With that information in place, we can now register the wiphy... */ |
11523 | if (wiphy_register(wdev->wiphy)) { | 11530 | if (wiphy_register(wdev->wiphy)) |
11524 | rc = -EIO; | 11531 | rc = -EIO; |
11525 | goto out; | ||
11526 | } | ||
11527 | |||
11528 | out: | 11532 | out: |
11529 | mutex_unlock(&priv->mutex); | ||
11530 | return rc; | 11533 | return rc; |
11531 | } | 11534 | } |
11532 | 11535 | ||
@@ -11833,14 +11836,22 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, | |||
11833 | goto out_remove_sysfs; | 11836 | goto out_remove_sysfs; |
11834 | } | 11837 | } |
11835 | 11838 | ||
11839 | err = ipw_wdev_init(net_dev); | ||
11840 | if (err) { | ||
11841 | IPW_ERROR("failed to register wireless device\n"); | ||
11842 | goto out_unregister_netdev; | ||
11843 | } | ||
11844 | |||
11836 | #ifdef CONFIG_IPW2200_PROMISCUOUS | 11845 | #ifdef CONFIG_IPW2200_PROMISCUOUS |
11837 | if (rtap_iface) { | 11846 | if (rtap_iface) { |
11838 | err = ipw_prom_alloc(priv); | 11847 | err = ipw_prom_alloc(priv); |
11839 | if (err) { | 11848 | if (err) { |
11840 | IPW_ERROR("Failed to register promiscuous network " | 11849 | IPW_ERROR("Failed to register promiscuous network " |
11841 | "device (error %d).\n", err); | 11850 | "device (error %d).\n", err); |
11842 | unregister_netdev(priv->net_dev); | 11851 | wiphy_unregister(priv->ieee->wdev.wiphy); |
11843 | goto out_remove_sysfs; | 11852 | kfree(priv->ieee->a_band.channels); |
11853 | kfree(priv->ieee->bg_band.channels); | ||
11854 | goto out_unregister_netdev; | ||
11844 | } | 11855 | } |
11845 | } | 11856 | } |
11846 | #endif | 11857 | #endif |
@@ -11852,6 +11863,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, | |||
11852 | 11863 | ||
11853 | return 0; | 11864 | return 0; |
11854 | 11865 | ||
11866 | out_unregister_netdev: | ||
11867 | unregister_netdev(priv->net_dev); | ||
11855 | out_remove_sysfs: | 11868 | out_remove_sysfs: |
11856 | sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); | 11869 | sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); |
11857 | out_release_irq: | 11870 | out_release_irq: |
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c index 0cc5177d738d..8faeaf2dddec 100644 --- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c +++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c | |||
@@ -821,12 +821,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, | |||
821 | 821 | ||
822 | out: | 822 | out: |
823 | 823 | ||
824 | rs_sta->last_txrate_idx = index; | 824 | if (sband->band == IEEE80211_BAND_5GHZ) { |
825 | if (sband->band == IEEE80211_BAND_5GHZ) | 825 | if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE)) |
826 | info->control.rates[0].idx = rs_sta->last_txrate_idx - | 826 | index = IWL_FIRST_OFDM_RATE; |
827 | IWL_FIRST_OFDM_RATE; | 827 | rs_sta->last_txrate_idx = index; |
828 | else | 828 | info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE; |
829 | } else { | ||
830 | rs_sta->last_txrate_idx = index; | ||
829 | info->control.rates[0].idx = rs_sta->last_txrate_idx; | 831 | info->control.rates[0].idx = rs_sta->last_txrate_idx; |
832 | } | ||
830 | 833 | ||
831 | IWL_DEBUG_RATE(priv, "leave: %d\n", index); | 834 | IWL_DEBUG_RATE(priv, "leave: %d\n", index); |
832 | } | 835 | } |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c index ea31d7674df3..a7b891453869 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c | |||
@@ -168,7 +168,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv) | |||
168 | 168 | ||
169 | memset(&cmd, 0, sizeof(cmd)); | 169 | memset(&cmd, 0, sizeof(cmd)); |
170 | iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); | 170 | iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); |
171 | memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib)); | 171 | memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib)); |
172 | if (!(cmd.radio_sensor_offset)) | 172 | if (!(cmd.radio_sensor_offset)) |
173 | cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; | 173 | cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; |
174 | 174 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 7f6c58ebbc44..6057e18f688c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
@@ -1780,7 +1780,12 @@ static int iwl_mac_setup_register(struct iwl_priv *priv, | |||
1780 | IEEE80211_HW_SPECTRUM_MGMT | | 1780 | IEEE80211_HW_SPECTRUM_MGMT | |
1781 | IEEE80211_HW_REPORTS_TX_ACK_STATUS; | 1781 | IEEE80211_HW_REPORTS_TX_ACK_STATUS; |
1782 | 1782 | ||
1783 | /* | ||
1784 | * Including the following line will crash some AP's. This | ||
1785 | * workaround removes the stimulus which causes the crash until | ||
1786 | * the AP software can be fixed. | ||
1783 | hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; | 1787 | hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; |
1788 | */ | ||
1784 | 1789 | ||
1785 | hw->flags |= IEEE80211_HW_SUPPORTS_PS | | 1790 | hw->flags |= IEEE80211_HW_SUPPORTS_PS | |
1786 | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; | 1791 | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c index ca686dbf5893..f6d823f012db 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c | |||
@@ -925,6 +925,8 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb) | |||
925 | cmd = txq->cmd[cmd_index]; | 925 | cmd = txq->cmd[cmd_index]; |
926 | meta = &txq->meta[cmd_index]; | 926 | meta = &txq->meta[cmd_index]; |
927 | 927 | ||
928 | txq->time_stamp = jiffies; | ||
929 | |||
928 | iwlagn_unmap_tfd(trans, meta, &txq->tfds[index], | 930 | iwlagn_unmap_tfd(trans, meta, &txq->tfds[index], |
929 | DMA_BIDIRECTIONAL); | 931 | DMA_BIDIRECTIONAL); |
930 | 932 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index a5ddb39ca4a0..31c98509f7e6 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c | |||
@@ -3769,14 +3769,15 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i) | |||
3769 | rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, ®); | 3769 | rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, ®); |
3770 | 3770 | ||
3771 | /* Apparently the data is read from end to start */ | 3771 | /* Apparently the data is read from end to start */ |
3772 | rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, | 3772 | rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, ®); |
3773 | (u32 *)&rt2x00dev->eeprom[i]); | 3773 | /* The returned value is in CPU order, but eeprom is le */ |
3774 | rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, | 3774 | rt2x00dev->eeprom[i] = cpu_to_le32(reg); |
3775 | (u32 *)&rt2x00dev->eeprom[i + 2]); | 3775 | rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, ®); |
3776 | rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, | 3776 | *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg); |
3777 | (u32 *)&rt2x00dev->eeprom[i + 4]); | 3777 | rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, ®); |
3778 | rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, | 3778 | *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg); |
3779 | (u32 *)&rt2x00dev->eeprom[i + 6]); | 3779 | rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, ®); |
3780 | *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg); | ||
3780 | 3781 | ||
3781 | mutex_unlock(&rt2x00dev->csr_mutex); | 3782 | mutex_unlock(&rt2x00dev->csr_mutex); |
3782 | } | 3783 | } |
@@ -3942,19 +3943,23 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) | |||
3942 | return -ENODEV; | 3943 | return -ENODEV; |
3943 | } | 3944 | } |
3944 | 3945 | ||
3945 | if (!rt2x00_rf(rt2x00dev, RF2820) && | 3946 | switch (rt2x00dev->chip.rf) { |
3946 | !rt2x00_rf(rt2x00dev, RF2850) && | 3947 | case RF2820: |
3947 | !rt2x00_rf(rt2x00dev, RF2720) && | 3948 | case RF2850: |
3948 | !rt2x00_rf(rt2x00dev, RF2750) && | 3949 | case RF2720: |
3949 | !rt2x00_rf(rt2x00dev, RF3020) && | 3950 | case RF2750: |
3950 | !rt2x00_rf(rt2x00dev, RF2020) && | 3951 | case RF3020: |
3951 | !rt2x00_rf(rt2x00dev, RF3021) && | 3952 | case RF2020: |
3952 | !rt2x00_rf(rt2x00dev, RF3022) && | 3953 | case RF3021: |
3953 | !rt2x00_rf(rt2x00dev, RF3052) && | 3954 | case RF3022: |
3954 | !rt2x00_rf(rt2x00dev, RF3320) && | 3955 | case RF3052: |
3955 | !rt2x00_rf(rt2x00dev, RF5370) && | 3956 | case RF3320: |
3956 | !rt2x00_rf(rt2x00dev, RF5390)) { | 3957 | case RF5370: |
3957 | ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); | 3958 | case RF5390: |
3959 | break; | ||
3960 | default: | ||
3961 | ERROR(rt2x00dev, "Invalid RF chipset 0x%x detected.\n", | ||
3962 | rt2x00dev->chip.rf); | ||
3958 | return -ENODEV; | 3963 | return -ENODEV; |
3959 | } | 3964 | } |
3960 | 3965 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 677b5ababbdd..f1565792f270 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c | |||
@@ -464,6 +464,15 @@ static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg) | |||
464 | int wcid, ack, pid; | 464 | int wcid, ack, pid; |
465 | int tx_wcid, tx_ack, tx_pid; | 465 | int tx_wcid, tx_ack, tx_pid; |
466 | 466 | ||
467 | if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || | ||
468 | !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) { | ||
469 | WARNING(entry->queue->rt2x00dev, | ||
470 | "Data pending for entry %u in queue %u\n", | ||
471 | entry->entry_idx, entry->queue->qid); | ||
472 | cond_resched(); | ||
473 | return false; | ||
474 | } | ||
475 | |||
467 | wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); | 476 | wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); |
468 | ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); | 477 | ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); |
469 | pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); | 478 | pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); |
@@ -529,13 +538,12 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev) | |||
529 | entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); | 538 | entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); |
530 | if (rt2800usb_txdone_entry_check(entry, reg)) | 539 | if (rt2800usb_txdone_entry_check(entry, reg)) |
531 | break; | 540 | break; |
541 | entry = NULL; | ||
532 | } | 542 | } |
533 | 543 | ||
534 | if (!entry || rt2x00queue_empty(queue)) | 544 | if (entry) |
535 | break; | 545 | rt2800_txdone_entry(entry, reg, |
536 | 546 | rt2800usb_get_txwi(entry)); | |
537 | rt2800_txdone_entry(entry, reg, | ||
538 | rt2800usb_get_txwi(entry)); | ||
539 | } | 547 | } |
540 | } | 548 | } |
541 | 549 | ||
@@ -559,8 +567,10 @@ static void rt2800usb_work_txdone(struct work_struct *work) | |||
559 | while (!rt2x00queue_empty(queue)) { | 567 | while (!rt2x00queue_empty(queue)) { |
560 | entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); | 568 | entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); |
561 | 569 | ||
562 | if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) | 570 | if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || |
571 | !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) | ||
563 | break; | 572 | break; |
573 | |||
564 | if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) | 574 | if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) |
565 | rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); | 575 | rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); |
566 | else if (rt2x00queue_status_timeout(entry)) | 576 | else if (rt2x00queue_status_timeout(entry)) |
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index b6b4542c2460..1e31050dafc9 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c | |||
@@ -262,23 +262,20 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb) | |||
262 | struct queue_entry *entry = (struct queue_entry *)urb->context; | 262 | struct queue_entry *entry = (struct queue_entry *)urb->context; |
263 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; | 263 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; |
264 | 264 | ||
265 | if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) | 265 | if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) |
266 | return; | 266 | return; |
267 | |||
268 | if (rt2x00dev->ops->lib->tx_dma_done) | ||
269 | rt2x00dev->ops->lib->tx_dma_done(entry); | ||
270 | |||
271 | /* | ||
272 | * Report the frame as DMA done | ||
273 | */ | ||
274 | rt2x00lib_dmadone(entry); | ||
275 | |||
276 | /* | 267 | /* |
277 | * Check if the frame was correctly uploaded | 268 | * Check if the frame was correctly uploaded |
278 | */ | 269 | */ |
279 | if (urb->status) | 270 | if (urb->status) |
280 | set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); | 271 | set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); |
272 | /* | ||
273 | * Report the frame as DMA done | ||
274 | */ | ||
275 | rt2x00lib_dmadone(entry); | ||
281 | 276 | ||
277 | if (rt2x00dev->ops->lib->tx_dma_done) | ||
278 | rt2x00dev->ops->lib->tx_dma_done(entry); | ||
282 | /* | 279 | /* |
283 | * Schedule the delayed work for reading the TX status | 280 | * Schedule the delayed work for reading the TX status |
284 | * from the device. | 281 | * from the device. |
@@ -874,18 +871,8 @@ int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state) | |||
874 | { | 871 | { |
875 | struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); | 872 | struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); |
876 | struct rt2x00_dev *rt2x00dev = hw->priv; | 873 | struct rt2x00_dev *rt2x00dev = hw->priv; |
877 | int retval; | ||
878 | |||
879 | retval = rt2x00lib_suspend(rt2x00dev, state); | ||
880 | if (retval) | ||
881 | return retval; | ||
882 | 874 | ||
883 | /* | 875 | return rt2x00lib_suspend(rt2x00dev, state); |
884 | * Decrease usbdev refcount. | ||
885 | */ | ||
886 | usb_put_dev(interface_to_usbdev(usb_intf)); | ||
887 | |||
888 | return 0; | ||
889 | } | 876 | } |
890 | EXPORT_SYMBOL_GPL(rt2x00usb_suspend); | 877 | EXPORT_SYMBOL_GPL(rt2x00usb_suspend); |
891 | 878 | ||
@@ -894,8 +881,6 @@ int rt2x00usb_resume(struct usb_interface *usb_intf) | |||
894 | struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); | 881 | struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); |
895 | struct rt2x00_dev *rt2x00dev = hw->priv; | 882 | struct rt2x00_dev *rt2x00dev = hw->priv; |
896 | 883 | ||
897 | usb_get_dev(interface_to_usbdev(usb_intf)); | ||
898 | |||
899 | return rt2x00lib_resume(rt2x00dev); | 884 | return rt2x00lib_resume(rt2x00dev); |
900 | } | 885 | } |
901 | EXPORT_SYMBOL_GPL(rt2x00usb_resume); | 886 | EXPORT_SYMBOL_GPL(rt2x00usb_resume); |
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index 1bdc1aa305c0..04c4e9eb6ee6 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c | |||
@@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, | |||
610 | 610 | ||
611 | mac->link_state = MAC80211_NOLINK; | 611 | mac->link_state = MAC80211_NOLINK; |
612 | memset(mac->bssid, 0, 6); | 612 | memset(mac->bssid, 0, 6); |
613 | |||
614 | /* reset sec info */ | ||
615 | rtl_cam_reset_sec_info(hw); | ||
616 | |||
617 | rtl_cam_reset_all_entry(hw); | ||
613 | mac->vendor = PEER_UNKNOWN; | 618 | mac->vendor = PEER_UNKNOWN; |
614 | 619 | ||
615 | RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, | 620 | RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, |
@@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | |||
1063 | *or clear all entry here. | 1068 | *or clear all entry here. |
1064 | */ | 1069 | */ |
1065 | rtl_cam_delete_one_entry(hw, mac_addr, key_idx); | 1070 | rtl_cam_delete_one_entry(hw, mac_addr, key_idx); |
1071 | |||
1072 | rtl_cam_reset_sec_info(hw); | ||
1073 | |||
1066 | break; | 1074 | break; |
1067 | default: | 1075 | default: |
1068 | RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, | 1076 | RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, |
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c index c4161148e0d8..bc33b147f44f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c | |||
@@ -548,15 +548,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, | |||
548 | (tcb_desc->rts_use_shortpreamble ? 1 : 0) | 548 | (tcb_desc->rts_use_shortpreamble ? 1 : 0) |
549 | : (tcb_desc->rts_use_shortgi ? 1 : 0))); | 549 | : (tcb_desc->rts_use_shortgi ? 1 : 0))); |
550 | if (mac->bw_40) { | 550 | if (mac->bw_40) { |
551 | if (tcb_desc->packet_bw) { | 551 | if (rate_flag & IEEE80211_TX_RC_DUP_DATA) { |
552 | SET_TX_DESC_DATA_BW(txdesc, 1); | 552 | SET_TX_DESC_DATA_BW(txdesc, 1); |
553 | SET_TX_DESC_DATA_SC(txdesc, 3); | 553 | SET_TX_DESC_DATA_SC(txdesc, 3); |
554 | } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){ | ||
555 | SET_TX_DESC_DATA_BW(txdesc, 1); | ||
556 | SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc); | ||
554 | } else { | 557 | } else { |
555 | SET_TX_DESC_DATA_BW(txdesc, 0); | 558 | SET_TX_DESC_DATA_BW(txdesc, 0); |
556 | if (rate_flag & IEEE80211_TX_RC_DUP_DATA) | 559 | SET_TX_DESC_DATA_SC(txdesc, 0); |
557 | SET_TX_DESC_DATA_SC(txdesc, | 560 | } |
558 | mac->cur_40_prime_sc); | ||
559 | } | ||
560 | } else { | 561 | } else { |
561 | SET_TX_DESC_DATA_BW(txdesc, 0); | 562 | SET_TX_DESC_DATA_BW(txdesc, 0); |
562 | SET_TX_DESC_DATA_SC(txdesc, 0); | 563 | SET_TX_DESC_DATA_SC(txdesc, 0); |
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c index e047594794aa..f2838ae07da5 100644 --- a/drivers/net/wireless/wl12xx/acx.c +++ b/drivers/net/wireless/wl12xx/acx.c | |||
@@ -78,8 +78,6 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth) | |||
78 | auth->sleep_auth = sleep_auth; | 78 | auth->sleep_auth = sleep_auth; |
79 | 79 | ||
80 | ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); | 80 | ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); |
81 | if (ret < 0) | ||
82 | return ret; | ||
83 | 81 | ||
84 | out: | 82 | out: |
85 | kfree(auth); | 83 | kfree(auth); |
@@ -576,10 +574,8 @@ int wl1271_acx_cca_threshold(struct wl1271 *wl) | |||
576 | 574 | ||
577 | ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, | 575 | ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, |
578 | detection, sizeof(*detection)); | 576 | detection, sizeof(*detection)); |
579 | if (ret < 0) { | 577 | if (ret < 0) |
580 | wl1271_warning("failed to set cca threshold: %d", ret); | 578 | wl1271_warning("failed to set cca threshold: %d", ret); |
581 | return ret; | ||
582 | } | ||
583 | 579 | ||
584 | out: | 580 | out: |
585 | kfree(detection); | 581 | kfree(detection); |
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c index ac2e5661397c..516a8980723c 100644 --- a/drivers/net/wireless/wl12xx/sdio.c +++ b/drivers/net/wireless/wl12xx/sdio.c | |||
@@ -164,7 +164,7 @@ static int wl1271_sdio_power_on(struct wl1271 *wl) | |||
164 | /* If enabled, tell runtime PM not to power off the card */ | 164 | /* If enabled, tell runtime PM not to power off the card */ |
165 | if (pm_runtime_enabled(&func->dev)) { | 165 | if (pm_runtime_enabled(&func->dev)) { |
166 | ret = pm_runtime_get_sync(&func->dev); | 166 | ret = pm_runtime_get_sync(&func->dev); |
167 | if (ret) | 167 | if (ret < 0) |
168 | goto out; | 168 | goto out; |
169 | } else { | 169 | } else { |
170 | /* Runtime PM is disabled: power up the card manually */ | 170 | /* Runtime PM is disabled: power up the card manually */ |
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c index 5d5e1ef87206..4ae8effaee22 100644 --- a/drivers/net/wireless/wl12xx/testmode.c +++ b/drivers/net/wireless/wl12xx/testmode.c | |||
@@ -36,7 +36,6 @@ enum wl1271_tm_commands { | |||
36 | WL1271_TM_CMD_TEST, | 36 | WL1271_TM_CMD_TEST, |
37 | WL1271_TM_CMD_INTERROGATE, | 37 | WL1271_TM_CMD_INTERROGATE, |
38 | WL1271_TM_CMD_CONFIGURE, | 38 | WL1271_TM_CMD_CONFIGURE, |
39 | WL1271_TM_CMD_NVS_PUSH, | ||
40 | WL1271_TM_CMD_SET_PLT_MODE, | 39 | WL1271_TM_CMD_SET_PLT_MODE, |
41 | WL1271_TM_CMD_RECOVER, | 40 | WL1271_TM_CMD_RECOVER, |
42 | 41 | ||
@@ -139,12 +138,15 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) | |||
139 | 138 | ||
140 | if (ret < 0) { | 139 | if (ret < 0) { |
141 | wl1271_warning("testmode cmd interrogate failed: %d", ret); | 140 | wl1271_warning("testmode cmd interrogate failed: %d", ret); |
141 | kfree(cmd); | ||
142 | return ret; | 142 | return ret; |
143 | } | 143 | } |
144 | 144 | ||
145 | skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); | 145 | skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); |
146 | if (!skb) | 146 | if (!skb) { |
147 | kfree(cmd); | ||
147 | return -ENOMEM; | 148 | return -ENOMEM; |
149 | } | ||
148 | 150 | ||
149 | NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); | 151 | NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); |
150 | 152 | ||
@@ -187,48 +189,6 @@ static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[]) | |||
187 | return 0; | 189 | return 0; |
188 | } | 190 | } |
189 | 191 | ||
190 | static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[]) | ||
191 | { | ||
192 | int ret = 0; | ||
193 | size_t len; | ||
194 | void *buf; | ||
195 | |||
196 | wl1271_debug(DEBUG_TESTMODE, "testmode cmd nvs push"); | ||
197 | |||
198 | if (!tb[WL1271_TM_ATTR_DATA]) | ||
199 | return -EINVAL; | ||
200 | |||
201 | buf = nla_data(tb[WL1271_TM_ATTR_DATA]); | ||
202 | len = nla_len(tb[WL1271_TM_ATTR_DATA]); | ||
203 | |||
204 | mutex_lock(&wl->mutex); | ||
205 | |||
206 | kfree(wl->nvs); | ||
207 | |||
208 | if ((wl->chip.id == CHIP_ID_1283_PG20) && | ||
209 | (len != sizeof(struct wl128x_nvs_file))) | ||
210 | return -EINVAL; | ||
211 | else if (len != sizeof(struct wl1271_nvs_file)) | ||
212 | return -EINVAL; | ||
213 | |||
214 | wl->nvs = kzalloc(len, GFP_KERNEL); | ||
215 | if (!wl->nvs) { | ||
216 | wl1271_error("could not allocate memory for the nvs file"); | ||
217 | ret = -ENOMEM; | ||
218 | goto out; | ||
219 | } | ||
220 | |||
221 | memcpy(wl->nvs, buf, len); | ||
222 | wl->nvs_len = len; | ||
223 | |||
224 | wl1271_debug(DEBUG_TESTMODE, "testmode pushed nvs"); | ||
225 | |||
226 | out: | ||
227 | mutex_unlock(&wl->mutex); | ||
228 | |||
229 | return ret; | ||
230 | } | ||
231 | |||
232 | static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[]) | 192 | static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[]) |
233 | { | 193 | { |
234 | u32 val; | 194 | u32 val; |
@@ -285,8 +245,6 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len) | |||
285 | return wl1271_tm_cmd_interrogate(wl, tb); | 245 | return wl1271_tm_cmd_interrogate(wl, tb); |
286 | case WL1271_TM_CMD_CONFIGURE: | 246 | case WL1271_TM_CMD_CONFIGURE: |
287 | return wl1271_tm_cmd_configure(wl, tb); | 247 | return wl1271_tm_cmd_configure(wl, tb); |
288 | case WL1271_TM_CMD_NVS_PUSH: | ||
289 | return wl1271_tm_cmd_nvs_push(wl, tb); | ||
290 | case WL1271_TM_CMD_SET_PLT_MODE: | 248 | case WL1271_TM_CMD_SET_PLT_MODE: |
291 | return wl1271_tm_cmd_set_plt_mode(wl, tb); | 249 | return wl1271_tm_cmd_set_plt_mode(wl, tb); |
292 | case WL1271_TM_CMD_RECOVER: | 250 | case WL1271_TM_CMD_RECOVER: |
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c index 749fdf070319..3ffd9c1acc0a 100644 --- a/drivers/pci/hotplug/pcihp_slot.c +++ b/drivers/pci/hotplug/pcihp_slot.c | |||
@@ -158,47 +158,6 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) | |||
158 | */ | 158 | */ |
159 | } | 159 | } |
160 | 160 | ||
161 | /* Program PCIE MaxPayload setting on device: ensure parent maxpayload <= device */ | ||
162 | static int pci_set_payload(struct pci_dev *dev) | ||
163 | { | ||
164 | int pos, ppos; | ||
165 | u16 pctl, psz; | ||
166 | u16 dctl, dsz, dcap, dmax; | ||
167 | struct pci_dev *parent; | ||
168 | |||
169 | parent = dev->bus->self; | ||
170 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
171 | if (!pos) | ||
172 | return 0; | ||
173 | |||
174 | /* Read Device MaxPayload capability and setting */ | ||
175 | pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &dctl); | ||
176 | pci_read_config_word(dev, pos + PCI_EXP_DEVCAP, &dcap); | ||
177 | dsz = (dctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5; | ||
178 | dmax = (dcap & PCI_EXP_DEVCAP_PAYLOAD); | ||
179 | |||
180 | /* Read Parent MaxPayload setting */ | ||
181 | ppos = pci_find_capability(parent, PCI_CAP_ID_EXP); | ||
182 | if (!ppos) | ||
183 | return 0; | ||
184 | pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl); | ||
185 | psz = (pctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5; | ||
186 | |||
187 | /* If parent payload > device max payload -> error | ||
188 | * If parent payload > device payload -> set speed | ||
189 | * If parent payload <= device payload -> do nothing | ||
190 | */ | ||
191 | if (psz > dmax) | ||
192 | return -1; | ||
193 | else if (psz > dsz) { | ||
194 | dev_info(&dev->dev, "Setting MaxPayload to %d\n", 128 << psz); | ||
195 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, | ||
196 | (dctl & ~PCI_EXP_DEVCTL_PAYLOAD) + | ||
197 | (psz << 5)); | ||
198 | } | ||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | void pci_configure_slot(struct pci_dev *dev) | 161 | void pci_configure_slot(struct pci_dev *dev) |
203 | { | 162 | { |
204 | struct pci_dev *cdev; | 163 | struct pci_dev *cdev; |
@@ -210,9 +169,9 @@ void pci_configure_slot(struct pci_dev *dev) | |||
210 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) | 169 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) |
211 | return; | 170 | return; |
212 | 171 | ||
213 | ret = pci_set_payload(dev); | 172 | if (dev->bus && dev->bus->self) |
214 | if (ret) | 173 | pcie_bus_configure_settings(dev->bus, |
215 | dev_warn(&dev->dev, "could not set device max payload\n"); | 174 | dev->bus->self->pcie_mpss); |
216 | 175 | ||
217 | memset(&hpp, 0, sizeof(hpp)); | 176 | memset(&hpp, 0, sizeof(hpp)); |
218 | ret = pci_get_hp_params(dev, &hpp); | 177 | ret = pci_get_hp_params(dev, &hpp); |
diff --git a/drivers/pci/of.c b/drivers/pci/of.c index c94d37ec55c8..f0929934bb7a 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c | |||
@@ -55,7 +55,7 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus) | |||
55 | */ | 55 | */ |
56 | if (bus->bridge->of_node) | 56 | if (bus->bridge->of_node) |
57 | return of_node_get(bus->bridge->of_node); | 57 | return of_node_get(bus->bridge->of_node); |
58 | if (bus->bridge->parent->of_node) | 58 | if (bus->bridge->parent && bus->bridge->parent->of_node) |
59 | return of_node_get(bus->bridge->parent->of_node); | 59 | return of_node_get(bus->bridge->parent->of_node); |
60 | return NULL; | 60 | return NULL; |
61 | } | 61 | } |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 08a95b369d85..4e84fd4a4312 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -77,6 +77,8 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; | |||
77 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; | 77 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; |
78 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; | 78 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; |
79 | 79 | ||
80 | enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; | ||
81 | |||
80 | /* | 82 | /* |
81 | * The default CLS is used if arch didn't set CLS explicitly and not | 83 | * The default CLS is used if arch didn't set CLS explicitly and not |
82 | * all pci devices agree on the same value. Arch can override either | 84 | * all pci devices agree on the same value. Arch can override either |
@@ -3223,6 +3225,67 @@ out: | |||
3223 | EXPORT_SYMBOL(pcie_set_readrq); | 3225 | EXPORT_SYMBOL(pcie_set_readrq); |
3224 | 3226 | ||
3225 | /** | 3227 | /** |
3228 | * pcie_get_mps - get PCI Express maximum payload size | ||
3229 | * @dev: PCI device to query | ||
3230 | * | ||
3231 | * Returns maximum payload size in bytes | ||
3232 | * or appropriate error value. | ||
3233 | */ | ||
3234 | int pcie_get_mps(struct pci_dev *dev) | ||
3235 | { | ||
3236 | int ret, cap; | ||
3237 | u16 ctl; | ||
3238 | |||
3239 | cap = pci_pcie_cap(dev); | ||
3240 | if (!cap) | ||
3241 | return -EINVAL; | ||
3242 | |||
3243 | ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); | ||
3244 | if (!ret) | ||
3245 | ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); | ||
3246 | |||
3247 | return ret; | ||
3248 | } | ||
3249 | |||
3250 | /** | ||
3251 | * pcie_set_mps - set PCI Express maximum payload size | ||
3252 | * @dev: PCI device to query | ||
3253 | * @mps: maximum payload size in bytes | ||
3254 | * valid values are 128, 256, 512, 1024, 2048, 4096 | ||
3255 | * | ||
3256 | * If possible sets maximum payload size | ||
3257 | */ | ||
3258 | int pcie_set_mps(struct pci_dev *dev, int mps) | ||
3259 | { | ||
3260 | int cap, err = -EINVAL; | ||
3261 | u16 ctl, v; | ||
3262 | |||
3263 | if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) | ||
3264 | goto out; | ||
3265 | |||
3266 | v = ffs(mps) - 8; | ||
3267 | if (v > dev->pcie_mpss) | ||
3268 | goto out; | ||
3269 | v <<= 5; | ||
3270 | |||
3271 | cap = pci_pcie_cap(dev); | ||
3272 | if (!cap) | ||
3273 | goto out; | ||
3274 | |||
3275 | err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); | ||
3276 | if (err) | ||
3277 | goto out; | ||
3278 | |||
3279 | if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) { | ||
3280 | ctl &= ~PCI_EXP_DEVCTL_PAYLOAD; | ||
3281 | ctl |= v; | ||
3282 | err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl); | ||
3283 | } | ||
3284 | out: | ||
3285 | return err; | ||
3286 | } | ||
3287 | |||
3288 | /** | ||
3226 | * pci_select_bars - Make BAR mask from the type of resource | 3289 | * pci_select_bars - Make BAR mask from the type of resource |
3227 | * @dev: the PCI device for which BAR mask is made | 3290 | * @dev: the PCI device for which BAR mask is made |
3228 | * @flags: resource type mask to be selected | 3291 | * @flags: resource type mask to be selected |
@@ -3505,6 +3568,10 @@ static int __init pci_setup(char *str) | |||
3505 | pci_hotplug_io_size = memparse(str + 9, &str); | 3568 | pci_hotplug_io_size = memparse(str + 9, &str); |
3506 | } else if (!strncmp(str, "hpmemsize=", 10)) { | 3569 | } else if (!strncmp(str, "hpmemsize=", 10)) { |
3507 | pci_hotplug_mem_size = memparse(str + 10, &str); | 3570 | pci_hotplug_mem_size = memparse(str + 10, &str); |
3571 | } else if (!strncmp(str, "pcie_bus_safe", 13)) { | ||
3572 | pcie_bus_config = PCIE_BUS_SAFE; | ||
3573 | } else if (!strncmp(str, "pcie_bus_perf", 13)) { | ||
3574 | pcie_bus_config = PCIE_BUS_PERFORMANCE; | ||
3508 | } else { | 3575 | } else { |
3509 | printk(KERN_ERR "PCI: Unknown option `%s'\n", | 3576 | printk(KERN_ERR "PCI: Unknown option `%s'\n", |
3510 | str); | 3577 | str); |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index c8cee764b0de..b74084e9ca12 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -283,6 +283,8 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) | |||
283 | 283 | ||
284 | #endif /* CONFIG_PCI_IOV */ | 284 | #endif /* CONFIG_PCI_IOV */ |
285 | 285 | ||
286 | extern unsigned long pci_cardbus_resource_alignment(struct resource *); | ||
287 | |||
286 | static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, | 288 | static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, |
287 | struct resource *res) | 289 | struct resource *res) |
288 | { | 290 | { |
@@ -292,6 +294,8 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, | |||
292 | if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) | 294 | if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) |
293 | return pci_sriov_resource_alignment(dev, resno); | 295 | return pci_sriov_resource_alignment(dev, resno); |
294 | #endif | 296 | #endif |
297 | if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS) | ||
298 | return pci_cardbus_resource_alignment(res); | ||
295 | return resource_alignment(res); | 299 | return resource_alignment(res); |
296 | } | 300 | } |
297 | 301 | ||
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 795c9026d55f..f3f94a5c068f 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -856,6 +856,8 @@ void set_pcie_port_type(struct pci_dev *pdev) | |||
856 | pdev->pcie_cap = pos; | 856 | pdev->pcie_cap = pos; |
857 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); | 857 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); |
858 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; | 858 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; |
859 | pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); | ||
860 | pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; | ||
859 | } | 861 | } |
860 | 862 | ||
861 | void set_pcie_hotplug_bridge(struct pci_dev *pdev) | 863 | void set_pcie_hotplug_bridge(struct pci_dev *pdev) |
@@ -1326,6 +1328,151 @@ int pci_scan_slot(struct pci_bus *bus, int devfn) | |||
1326 | return nr; | 1328 | return nr; |
1327 | } | 1329 | } |
1328 | 1330 | ||
1331 | static int pcie_find_smpss(struct pci_dev *dev, void *data) | ||
1332 | { | ||
1333 | u8 *smpss = data; | ||
1334 | |||
1335 | if (!pci_is_pcie(dev)) | ||
1336 | return 0; | ||
1337 | |||
1338 | /* For PCIE hotplug enabled slots not connected directly to a | ||
1339 | * PCI-E root port, there can be problems when hotplugging | ||
1340 | * devices. This is due to the possibility of hotplugging a | ||
1341 | * device into the fabric with a smaller MPS that the devices | ||
1342 | * currently running have configured. Modifying the MPS on the | ||
1343 | * running devices could cause a fatal bus error due to an | ||
1344 | * incoming frame being larger than the newly configured MPS. | ||
1345 | * To work around this, the MPS for the entire fabric must be | ||
1346 | * set to the minimum size. Any devices hotplugged into this | ||
1347 | * fabric will have the minimum MPS set. If the PCI hotplug | ||
1348 | * slot is directly connected to the root port and there are not | ||
1349 | * other devices on the fabric (which seems to be the most | ||
1350 | * common case), then this is not an issue and MPS discovery | ||
1351 | * will occur as normal. | ||
1352 | */ | ||
1353 | if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || | ||
1354 | (dev->bus->self && | ||
1355 | dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))) | ||
1356 | *smpss = 0; | ||
1357 | |||
1358 | if (*smpss > dev->pcie_mpss) | ||
1359 | *smpss = dev->pcie_mpss; | ||
1360 | |||
1361 | return 0; | ||
1362 | } | ||
1363 | |||
1364 | static void pcie_write_mps(struct pci_dev *dev, int mps) | ||
1365 | { | ||
1366 | int rc, dev_mpss; | ||
1367 | |||
1368 | dev_mpss = 128 << dev->pcie_mpss; | ||
1369 | |||
1370 | if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { | ||
1371 | if (dev->bus->self) { | ||
1372 | dev_dbg(&dev->bus->dev, "Bus MPSS %d\n", | ||
1373 | 128 << dev->bus->self->pcie_mpss); | ||
1374 | |||
1375 | /* For "MPS Force Max", the assumption is made that | ||
1376 | * downstream communication will never be larger than | ||
1377 | * the MRRS. So, the MPS only needs to be configured | ||
1378 | * for the upstream communication. This being the case, | ||
1379 | * walk from the top down and set the MPS of the child | ||
1380 | * to that of the parent bus. | ||
1381 | */ | ||
1382 | mps = 128 << dev->bus->self->pcie_mpss; | ||
1383 | if (mps > dev_mpss) | ||
1384 | dev_warn(&dev->dev, "MPS configured higher than" | ||
1385 | " maximum supported by the device. If" | ||
1386 | " a bus issue occurs, try running with" | ||
1387 | " pci=pcie_bus_safe.\n"); | ||
1388 | } | ||
1389 | |||
1390 | dev->pcie_mpss = ffs(mps) - 8; | ||
1391 | } | ||
1392 | |||
1393 | rc = pcie_set_mps(dev, mps); | ||
1394 | if (rc) | ||
1395 | dev_err(&dev->dev, "Failed attempting to set the MPS\n"); | ||
1396 | } | ||
1397 | |||
1398 | static void pcie_write_mrrs(struct pci_dev *dev, int mps) | ||
1399 | { | ||
1400 | int rc, mrrs, dev_mpss; | ||
1401 | |||
1402 | /* In the "safe" case, do not configure the MRRS. There appear to be | ||
1403 | * issues with setting MRRS to 0 on a number of devices. | ||
1404 | */ | ||
1405 | |||
1406 | if (pcie_bus_config != PCIE_BUS_PERFORMANCE) | ||
1407 | return; | ||
1408 | |||
1409 | dev_mpss = 128 << dev->pcie_mpss; | ||
1410 | |||
1411 | /* For Max performance, the MRRS must be set to the largest supported | ||
1412 | * value. However, it cannot be configured larger than the MPS the | ||
1413 | * device or the bus can support. This assumes that the largest MRRS | ||
1414 | * available on the device cannot be smaller than the device MPSS. | ||
1415 | */ | ||
1416 | mrrs = min(mps, dev_mpss); | ||
1417 | |||
1418 | /* MRRS is a R/W register. Invalid values can be written, but a | ||
1419 | * subsequent read will verify if the value is acceptable or not. | ||
1420 | * If the MRRS value provided is not acceptable (e.g., too large), | ||
1421 | * shrink the value until it is acceptable to the HW. | ||
1422 | */ | ||
1423 | while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { | ||
1424 | dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value" | ||
1425 | " to %d. If any issues are encountered, please try " | ||
1426 | "running with pci=pcie_bus_safe\n", mrrs); | ||
1427 | rc = pcie_set_readrq(dev, mrrs); | ||
1428 | if (rc) | ||
1429 | dev_err(&dev->dev, | ||
1430 | "Failed attempting to set the MRRS\n"); | ||
1431 | |||
1432 | mrrs /= 2; | ||
1433 | } | ||
1434 | } | ||
1435 | |||
1436 | static int pcie_bus_configure_set(struct pci_dev *dev, void *data) | ||
1437 | { | ||
1438 | int mps = 128 << *(u8 *)data; | ||
1439 | |||
1440 | if (!pci_is_pcie(dev)) | ||
1441 | return 0; | ||
1442 | |||
1443 | dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", | ||
1444 | pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev)); | ||
1445 | |||
1446 | pcie_write_mps(dev, mps); | ||
1447 | pcie_write_mrrs(dev, mps); | ||
1448 | |||
1449 | dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", | ||
1450 | pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev)); | ||
1451 | |||
1452 | return 0; | ||
1453 | } | ||
1454 | |||
1455 | /* pcie_bus_configure_mps requires that pci_walk_bus work in a top-down, | ||
1456 | * parents then children fashion. If this changes, then this code will not | ||
1457 | * work as designed. | ||
1458 | */ | ||
1459 | void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) | ||
1460 | { | ||
1461 | u8 smpss = mpss; | ||
1462 | |||
1463 | if (!pci_is_pcie(bus->self)) | ||
1464 | return; | ||
1465 | |||
1466 | if (pcie_bus_config == PCIE_BUS_SAFE) { | ||
1467 | pcie_find_smpss(bus->self, &smpss); | ||
1468 | pci_walk_bus(bus, pcie_find_smpss, &smpss); | ||
1469 | } | ||
1470 | |||
1471 | pcie_bus_configure_set(bus->self, &smpss); | ||
1472 | pci_walk_bus(bus, pcie_bus_configure_set, &smpss); | ||
1473 | } | ||
1474 | EXPORT_SYMBOL_GPL(pcie_bus_configure_settings); | ||
1475 | |||
1329 | unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) | 1476 | unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) |
1330 | { | 1477 | { |
1331 | unsigned int devfn, pass, max = bus->secondary; | 1478 | unsigned int devfn, pass, max = bus->secondary; |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 8a1d3c7863a8..784da9d36029 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -34,6 +34,7 @@ struct resource_list_x { | |||
34 | resource_size_t start; | 34 | resource_size_t start; |
35 | resource_size_t end; | 35 | resource_size_t end; |
36 | resource_size_t add_size; | 36 | resource_size_t add_size; |
37 | resource_size_t min_align; | ||
37 | unsigned long flags; | 38 | unsigned long flags; |
38 | }; | 39 | }; |
39 | 40 | ||
@@ -65,7 +66,7 @@ void pci_realloc(void) | |||
65 | */ | 66 | */ |
66 | static void add_to_list(struct resource_list_x *head, | 67 | static void add_to_list(struct resource_list_x *head, |
67 | struct pci_dev *dev, struct resource *res, | 68 | struct pci_dev *dev, struct resource *res, |
68 | resource_size_t add_size) | 69 | resource_size_t add_size, resource_size_t min_align) |
69 | { | 70 | { |
70 | struct resource_list_x *list = head; | 71 | struct resource_list_x *list = head; |
71 | struct resource_list_x *ln = list->next; | 72 | struct resource_list_x *ln = list->next; |
@@ -84,13 +85,16 @@ static void add_to_list(struct resource_list_x *head, | |||
84 | tmp->end = res->end; | 85 | tmp->end = res->end; |
85 | tmp->flags = res->flags; | 86 | tmp->flags = res->flags; |
86 | tmp->add_size = add_size; | 87 | tmp->add_size = add_size; |
88 | tmp->min_align = min_align; | ||
87 | list->next = tmp; | 89 | list->next = tmp; |
88 | } | 90 | } |
89 | 91 | ||
90 | static void add_to_failed_list(struct resource_list_x *head, | 92 | static void add_to_failed_list(struct resource_list_x *head, |
91 | struct pci_dev *dev, struct resource *res) | 93 | struct pci_dev *dev, struct resource *res) |
92 | { | 94 | { |
93 | add_to_list(head, dev, res, 0); | 95 | add_to_list(head, dev, res, |
96 | 0 /* dont care */, | ||
97 | 0 /* dont care */); | ||
94 | } | 98 | } |
95 | 99 | ||
96 | static void __dev_sort_resources(struct pci_dev *dev, | 100 | static void __dev_sort_resources(struct pci_dev *dev, |
@@ -121,18 +125,18 @@ static inline void reset_resource(struct resource *res) | |||
121 | } | 125 | } |
122 | 126 | ||
123 | /** | 127 | /** |
124 | * adjust_resources_sorted() - satisfy any additional resource requests | 128 | * reassign_resources_sorted() - satisfy any additional resource requests |
125 | * | 129 | * |
126 | * @add_head : head of the list tracking requests requiring additional | 130 | * @realloc_head : head of the list tracking requests requiring additional |
127 | * resources | 131 | * resources |
128 | * @head : head of the list tracking requests with allocated | 132 | * @head : head of the list tracking requests with allocated |
129 | * resources | 133 | * resources |
130 | * | 134 | * |
131 | * Walk through each element of the add_head and try to procure | 135 | * Walk through each element of the realloc_head and try to procure |
132 | * additional resources for the element, provided the element | 136 | * additional resources for the element, provided the element |
133 | * is in the head list. | 137 | * is in the head list. |
134 | */ | 138 | */ |
135 | static void adjust_resources_sorted(struct resource_list_x *add_head, | 139 | static void reassign_resources_sorted(struct resource_list_x *realloc_head, |
136 | struct resource_list *head) | 140 | struct resource_list *head) |
137 | { | 141 | { |
138 | struct resource *res; | 142 | struct resource *res; |
@@ -141,8 +145,8 @@ static void adjust_resources_sorted(struct resource_list_x *add_head, | |||
141 | resource_size_t add_size; | 145 | resource_size_t add_size; |
142 | int idx; | 146 | int idx; |
143 | 147 | ||
144 | prev = add_head; | 148 | prev = realloc_head; |
145 | for (list = add_head->next; list;) { | 149 | for (list = realloc_head->next; list;) { |
146 | res = list->res; | 150 | res = list->res; |
147 | /* skip resource that has been reset */ | 151 | /* skip resource that has been reset */ |
148 | if (!res->flags) | 152 | if (!res->flags) |
@@ -159,13 +163,17 @@ static void adjust_resources_sorted(struct resource_list_x *add_head, | |||
159 | 163 | ||
160 | idx = res - &list->dev->resource[0]; | 164 | idx = res - &list->dev->resource[0]; |
161 | add_size=list->add_size; | 165 | add_size=list->add_size; |
162 | if (!resource_size(res) && add_size) { | 166 | if (!resource_size(res)) { |
163 | res->end = res->start + add_size - 1; | 167 | res->start = list->start; |
164 | if(pci_assign_resource(list->dev, idx)) | 168 | res->end = res->start + add_size - 1; |
169 | if(pci_assign_resource(list->dev, idx)) | ||
165 | reset_resource(res); | 170 | reset_resource(res); |
166 | } else if (add_size) { | 171 | } else { |
167 | adjust_resource(res, res->start, | 172 | resource_size_t align = list->min_align; |
168 | resource_size(res) + add_size); | 173 | res->flags |= list->flags & (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN); |
174 | if (pci_reassign_resource(list->dev, idx, add_size, align)) | ||
175 | dev_printk(KERN_DEBUG, &list->dev->dev, "failed to add optional resources res=%pR\n", | ||
176 | res); | ||
169 | } | 177 | } |
170 | out: | 178 | out: |
171 | tmp = list; | 179 | tmp = list; |
@@ -210,16 +218,16 @@ static void assign_requested_resources_sorted(struct resource_list *head, | |||
210 | } | 218 | } |
211 | 219 | ||
212 | static void __assign_resources_sorted(struct resource_list *head, | 220 | static void __assign_resources_sorted(struct resource_list *head, |
213 | struct resource_list_x *add_head, | 221 | struct resource_list_x *realloc_head, |
214 | struct resource_list_x *fail_head) | 222 | struct resource_list_x *fail_head) |
215 | { | 223 | { |
216 | /* Satisfy the must-have resource requests */ | 224 | /* Satisfy the must-have resource requests */ |
217 | assign_requested_resources_sorted(head, fail_head); | 225 | assign_requested_resources_sorted(head, fail_head); |
218 | 226 | ||
219 | /* Try to satisfy any additional nice-to-have resource | 227 | /* Try to satisfy any additional optional resource |
220 | requests */ | 228 | requests */ |
221 | if (add_head) | 229 | if (realloc_head) |
222 | adjust_resources_sorted(add_head, head); | 230 | reassign_resources_sorted(realloc_head, head); |
223 | free_list(resource_list, head); | 231 | free_list(resource_list, head); |
224 | } | 232 | } |
225 | 233 | ||
@@ -235,7 +243,7 @@ static void pdev_assign_resources_sorted(struct pci_dev *dev, | |||
235 | } | 243 | } |
236 | 244 | ||
237 | static void pbus_assign_resources_sorted(const struct pci_bus *bus, | 245 | static void pbus_assign_resources_sorted(const struct pci_bus *bus, |
238 | struct resource_list_x *add_head, | 246 | struct resource_list_x *realloc_head, |
239 | struct resource_list_x *fail_head) | 247 | struct resource_list_x *fail_head) |
240 | { | 248 | { |
241 | struct pci_dev *dev; | 249 | struct pci_dev *dev; |
@@ -245,7 +253,7 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus, | |||
245 | list_for_each_entry(dev, &bus->devices, bus_list) | 253 | list_for_each_entry(dev, &bus->devices, bus_list) |
246 | __dev_sort_resources(dev, &head); | 254 | __dev_sort_resources(dev, &head); |
247 | 255 | ||
248 | __assign_resources_sorted(&head, add_head, fail_head); | 256 | __assign_resources_sorted(&head, realloc_head, fail_head); |
249 | } | 257 | } |
250 | 258 | ||
251 | void pci_setup_cardbus(struct pci_bus *bus) | 259 | void pci_setup_cardbus(struct pci_bus *bus) |
@@ -540,13 +548,27 @@ static resource_size_t calculate_memsize(resource_size_t size, | |||
540 | return size; | 548 | return size; |
541 | } | 549 | } |
542 | 550 | ||
551 | static resource_size_t get_res_add_size(struct resource_list_x *realloc_head, | ||
552 | struct resource *res) | ||
553 | { | ||
554 | struct resource_list_x *list; | ||
555 | |||
556 | /* check if it is in realloc_head list */ | ||
557 | for (list = realloc_head->next; list && list->res != res; | ||
558 | list = list->next); | ||
559 | if (list) | ||
560 | return list->add_size; | ||
561 | |||
562 | return 0; | ||
563 | } | ||
564 | |||
543 | /** | 565 | /** |
544 | * pbus_size_io() - size the io window of a given bus | 566 | * pbus_size_io() - size the io window of a given bus |
545 | * | 567 | * |
546 | * @bus : the bus | 568 | * @bus : the bus |
547 | * @min_size : the minimum io window that must to be allocated | 569 | * @min_size : the minimum io window that must to be allocated |
548 | * @add_size : additional optional io window | 570 | * @add_size : additional optional io window |
549 | * @add_head : track the additional io window on this list | 571 | * @realloc_head : track the additional io window on this list |
550 | * | 572 | * |
551 | * Sizing the IO windows of the PCI-PCI bridge is trivial, | 573 | * Sizing the IO windows of the PCI-PCI bridge is trivial, |
552 | * since these windows have 4K granularity and the IO ranges | 574 | * since these windows have 4K granularity and the IO ranges |
@@ -554,11 +576,12 @@ static resource_size_t calculate_memsize(resource_size_t size, | |||
554 | * We must be careful with the ISA aliasing though. | 576 | * We must be careful with the ISA aliasing though. |
555 | */ | 577 | */ |
556 | static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | 578 | static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, |
557 | resource_size_t add_size, struct resource_list_x *add_head) | 579 | resource_size_t add_size, struct resource_list_x *realloc_head) |
558 | { | 580 | { |
559 | struct pci_dev *dev; | 581 | struct pci_dev *dev; |
560 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); | 582 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); |
561 | unsigned long size = 0, size0 = 0, size1 = 0; | 583 | unsigned long size = 0, size0 = 0, size1 = 0; |
584 | resource_size_t children_add_size = 0; | ||
562 | 585 | ||
563 | if (!b_res) | 586 | if (!b_res) |
564 | return; | 587 | return; |
@@ -579,11 +602,16 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | |||
579 | size += r_size; | 602 | size += r_size; |
580 | else | 603 | else |
581 | size1 += r_size; | 604 | size1 += r_size; |
605 | |||
606 | if (realloc_head) | ||
607 | children_add_size += get_res_add_size(realloc_head, r); | ||
582 | } | 608 | } |
583 | } | 609 | } |
584 | size0 = calculate_iosize(size, min_size, size1, | 610 | size0 = calculate_iosize(size, min_size, size1, |
585 | resource_size(b_res), 4096); | 611 | resource_size(b_res), 4096); |
586 | size1 = (!add_head || (add_head && !add_size)) ? size0 : | 612 | if (children_add_size > add_size) |
613 | add_size = children_add_size; | ||
614 | size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 : | ||
587 | calculate_iosize(size, min_size+add_size, size1, | 615 | calculate_iosize(size, min_size+add_size, size1, |
588 | resource_size(b_res), 4096); | 616 | resource_size(b_res), 4096); |
589 | if (!size0 && !size1) { | 617 | if (!size0 && !size1) { |
@@ -598,8 +626,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | |||
598 | b_res->start = 4096; | 626 | b_res->start = 4096; |
599 | b_res->end = b_res->start + size0 - 1; | 627 | b_res->end = b_res->start + size0 - 1; |
600 | b_res->flags |= IORESOURCE_STARTALIGN; | 628 | b_res->flags |= IORESOURCE_STARTALIGN; |
601 | if (size1 > size0 && add_head) | 629 | if (size1 > size0 && realloc_head) |
602 | add_to_list(add_head, bus->self, b_res, size1-size0); | 630 | add_to_list(realloc_head, bus->self, b_res, size1-size0, 4096); |
603 | } | 631 | } |
604 | 632 | ||
605 | /** | 633 | /** |
@@ -608,7 +636,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | |||
608 | * @bus : the bus | 636 | * @bus : the bus |
609 | * @min_size : the minimum memory window that must to be allocated | 637 | * @min_size : the minimum memory window that must to be allocated |
610 | * @add_size : additional optional memory window | 638 | * @add_size : additional optional memory window |
611 | * @add_head : track the additional memory window on this list | 639 | * @realloc_head : track the additional memory window on this list |
612 | * | 640 | * |
613 | * Calculate the size of the bus and minimal alignment which | 641 | * Calculate the size of the bus and minimal alignment which |
614 | * guarantees that all child resources fit in this size. | 642 | * guarantees that all child resources fit in this size. |
@@ -616,7 +644,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | |||
616 | static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | 644 | static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, |
617 | unsigned long type, resource_size_t min_size, | 645 | unsigned long type, resource_size_t min_size, |
618 | resource_size_t add_size, | 646 | resource_size_t add_size, |
619 | struct resource_list_x *add_head) | 647 | struct resource_list_x *realloc_head) |
620 | { | 648 | { |
621 | struct pci_dev *dev; | 649 | struct pci_dev *dev; |
622 | resource_size_t min_align, align, size, size0, size1; | 650 | resource_size_t min_align, align, size, size0, size1; |
@@ -624,6 +652,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
624 | int order, max_order; | 652 | int order, max_order; |
625 | struct resource *b_res = find_free_bus_resource(bus, type); | 653 | struct resource *b_res = find_free_bus_resource(bus, type); |
626 | unsigned int mem64_mask = 0; | 654 | unsigned int mem64_mask = 0; |
655 | resource_size_t children_add_size = 0; | ||
627 | 656 | ||
628 | if (!b_res) | 657 | if (!b_res) |
629 | return 0; | 658 | return 0; |
@@ -645,6 +674,16 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
645 | if (r->parent || (r->flags & mask) != type) | 674 | if (r->parent || (r->flags & mask) != type) |
646 | continue; | 675 | continue; |
647 | r_size = resource_size(r); | 676 | r_size = resource_size(r); |
677 | #ifdef CONFIG_PCI_IOV | ||
678 | /* put SRIOV requested res to the optional list */ | ||
679 | if (realloc_head && i >= PCI_IOV_RESOURCES && | ||
680 | i <= PCI_IOV_RESOURCE_END) { | ||
681 | r->end = r->start - 1; | ||
682 | add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */); | ||
683 | children_add_size += r_size; | ||
684 | continue; | ||
685 | } | ||
686 | #endif | ||
648 | /* For bridges size != alignment */ | 687 | /* For bridges size != alignment */ |
649 | align = pci_resource_alignment(dev, r); | 688 | align = pci_resource_alignment(dev, r); |
650 | order = __ffs(align) - 20; | 689 | order = __ffs(align) - 20; |
@@ -665,6 +704,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
665 | if (order > max_order) | 704 | if (order > max_order) |
666 | max_order = order; | 705 | max_order = order; |
667 | mem64_mask &= r->flags & IORESOURCE_MEM_64; | 706 | mem64_mask &= r->flags & IORESOURCE_MEM_64; |
707 | |||
708 | if (realloc_head) | ||
709 | children_add_size += get_res_add_size(realloc_head, r); | ||
668 | } | 710 | } |
669 | } | 711 | } |
670 | align = 0; | 712 | align = 0; |
@@ -681,7 +723,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
681 | align += aligns[order]; | 723 | align += aligns[order]; |
682 | } | 724 | } |
683 | size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); | 725 | size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); |
684 | size1 = (!add_head || (add_head && !add_size)) ? size0 : | 726 | if (children_add_size > add_size) |
727 | add_size = children_add_size; | ||
728 | size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 : | ||
685 | calculate_memsize(size, min_size+add_size, 0, | 729 | calculate_memsize(size, min_size+add_size, 0, |
686 | resource_size(b_res), min_align); | 730 | resource_size(b_res), min_align); |
687 | if (!size0 && !size1) { | 731 | if (!size0 && !size1) { |
@@ -695,12 +739,22 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
695 | b_res->start = min_align; | 739 | b_res->start = min_align; |
696 | b_res->end = size0 + min_align - 1; | 740 | b_res->end = size0 + min_align - 1; |
697 | b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask; | 741 | b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask; |
698 | if (size1 > size0 && add_head) | 742 | if (size1 > size0 && realloc_head) |
699 | add_to_list(add_head, bus->self, b_res, size1-size0); | 743 | add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align); |
700 | return 1; | 744 | return 1; |
701 | } | 745 | } |
702 | 746 | ||
703 | static void pci_bus_size_cardbus(struct pci_bus *bus) | 747 | unsigned long pci_cardbus_resource_alignment(struct resource *res) |
748 | { | ||
749 | if (res->flags & IORESOURCE_IO) | ||
750 | return pci_cardbus_io_size; | ||
751 | if (res->flags & IORESOURCE_MEM) | ||
752 | return pci_cardbus_mem_size; | ||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | static void pci_bus_size_cardbus(struct pci_bus *bus, | ||
757 | struct resource_list_x *realloc_head) | ||
704 | { | 758 | { |
705 | struct pci_dev *bridge = bus->self; | 759 | struct pci_dev *bridge = bus->self; |
706 | struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; | 760 | struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; |
@@ -711,12 +765,14 @@ static void pci_bus_size_cardbus(struct pci_bus *bus) | |||
711 | * a fixed amount of bus space for CardBus bridges. | 765 | * a fixed amount of bus space for CardBus bridges. |
712 | */ | 766 | */ |
713 | b_res[0].start = 0; | 767 | b_res[0].start = 0; |
714 | b_res[0].end = pci_cardbus_io_size - 1; | ||
715 | b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; | 768 | b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; |
769 | if (realloc_head) | ||
770 | add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size, 0 /* dont care */); | ||
716 | 771 | ||
717 | b_res[1].start = 0; | 772 | b_res[1].start = 0; |
718 | b_res[1].end = pci_cardbus_io_size - 1; | ||
719 | b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; | 773 | b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; |
774 | if (realloc_head) | ||
775 | add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size, 0 /* dont care */); | ||
720 | 776 | ||
721 | /* | 777 | /* |
722 | * Check whether prefetchable memory is supported | 778 | * Check whether prefetchable memory is supported |
@@ -736,21 +792,31 @@ static void pci_bus_size_cardbus(struct pci_bus *bus) | |||
736 | */ | 792 | */ |
737 | if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { | 793 | if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { |
738 | b_res[2].start = 0; | 794 | b_res[2].start = 0; |
739 | b_res[2].end = pci_cardbus_mem_size - 1; | ||
740 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN; | 795 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN; |
796 | if (realloc_head) | ||
797 | add_to_list(realloc_head, bridge, b_res+2, pci_cardbus_mem_size, 0 /* dont care */); | ||
741 | 798 | ||
742 | b_res[3].start = 0; | 799 | b_res[3].start = 0; |
743 | b_res[3].end = pci_cardbus_mem_size - 1; | ||
744 | b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; | 800 | b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; |
801 | if (realloc_head) | ||
802 | add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size, 0 /* dont care */); | ||
745 | } else { | 803 | } else { |
746 | b_res[3].start = 0; | 804 | b_res[3].start = 0; |
747 | b_res[3].end = pci_cardbus_mem_size * 2 - 1; | ||
748 | b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; | 805 | b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; |
806 | if (realloc_head) | ||
807 | add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size * 2, 0 /* dont care */); | ||
749 | } | 808 | } |
809 | |||
810 | /* set the size of the resource to zero, so that the resource does not | ||
811 | * get assigned during required-resource allocation cycle but gets assigned | ||
812 | * during the optional-resource allocation cycle. | ||
813 | */ | ||
814 | b_res[0].start = b_res[1].start = b_res[2].start = b_res[3].start = 1; | ||
815 | b_res[0].end = b_res[1].end = b_res[2].end = b_res[3].end = 0; | ||
750 | } | 816 | } |
751 | 817 | ||
752 | void __ref __pci_bus_size_bridges(struct pci_bus *bus, | 818 | void __ref __pci_bus_size_bridges(struct pci_bus *bus, |
753 | struct resource_list_x *add_head) | 819 | struct resource_list_x *realloc_head) |
754 | { | 820 | { |
755 | struct pci_dev *dev; | 821 | struct pci_dev *dev; |
756 | unsigned long mask, prefmask; | 822 | unsigned long mask, prefmask; |
@@ -763,12 +829,12 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus, | |||
763 | 829 | ||
764 | switch (dev->class >> 8) { | 830 | switch (dev->class >> 8) { |
765 | case PCI_CLASS_BRIDGE_CARDBUS: | 831 | case PCI_CLASS_BRIDGE_CARDBUS: |
766 | pci_bus_size_cardbus(b); | 832 | pci_bus_size_cardbus(b, realloc_head); |
767 | break; | 833 | break; |
768 | 834 | ||
769 | case PCI_CLASS_BRIDGE_PCI: | 835 | case PCI_CLASS_BRIDGE_PCI: |
770 | default: | 836 | default: |
771 | __pci_bus_size_bridges(b, add_head); | 837 | __pci_bus_size_bridges(b, realloc_head); |
772 | break; | 838 | break; |
773 | } | 839 | } |
774 | } | 840 | } |
@@ -792,7 +858,7 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus, | |||
792 | * Follow thru | 858 | * Follow thru |
793 | */ | 859 | */ |
794 | default: | 860 | default: |
795 | pbus_size_io(bus, 0, additional_io_size, add_head); | 861 | pbus_size_io(bus, 0, additional_io_size, realloc_head); |
796 | /* If the bridge supports prefetchable range, size it | 862 | /* If the bridge supports prefetchable range, size it |
797 | separately. If it doesn't, or its prefetchable window | 863 | separately. If it doesn't, or its prefetchable window |
798 | has already been allocated by arch code, try | 864 | has already been allocated by arch code, try |
@@ -800,11 +866,11 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus, | |||
800 | resources. */ | 866 | resources. */ |
801 | mask = IORESOURCE_MEM; | 867 | mask = IORESOURCE_MEM; |
802 | prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; | 868 | prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; |
803 | if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, add_head)) | 869 | if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, realloc_head)) |
804 | mask = prefmask; /* Success, size non-prefetch only. */ | 870 | mask = prefmask; /* Success, size non-prefetch only. */ |
805 | else | 871 | else |
806 | additional_mem_size += additional_mem_size; | 872 | additional_mem_size += additional_mem_size; |
807 | pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, add_head); | 873 | pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, realloc_head); |
808 | break; | 874 | break; |
809 | } | 875 | } |
810 | } | 876 | } |
@@ -816,20 +882,20 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus) | |||
816 | EXPORT_SYMBOL(pci_bus_size_bridges); | 882 | EXPORT_SYMBOL(pci_bus_size_bridges); |
817 | 883 | ||
818 | static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, | 884 | static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, |
819 | struct resource_list_x *add_head, | 885 | struct resource_list_x *realloc_head, |
820 | struct resource_list_x *fail_head) | 886 | struct resource_list_x *fail_head) |
821 | { | 887 | { |
822 | struct pci_bus *b; | 888 | struct pci_bus *b; |
823 | struct pci_dev *dev; | 889 | struct pci_dev *dev; |
824 | 890 | ||
825 | pbus_assign_resources_sorted(bus, add_head, fail_head); | 891 | pbus_assign_resources_sorted(bus, realloc_head, fail_head); |
826 | 892 | ||
827 | list_for_each_entry(dev, &bus->devices, bus_list) { | 893 | list_for_each_entry(dev, &bus->devices, bus_list) { |
828 | b = dev->subordinate; | 894 | b = dev->subordinate; |
829 | if (!b) | 895 | if (!b) |
830 | continue; | 896 | continue; |
831 | 897 | ||
832 | __pci_bus_assign_resources(b, add_head, fail_head); | 898 | __pci_bus_assign_resources(b, realloc_head, fail_head); |
833 | 899 | ||
834 | switch (dev->class >> 8) { | 900 | switch (dev->class >> 8) { |
835 | case PCI_CLASS_BRIDGE_PCI: | 901 | case PCI_CLASS_BRIDGE_PCI: |
@@ -1039,7 +1105,7 @@ void __init | |||
1039 | pci_assign_unassigned_resources(void) | 1105 | pci_assign_unassigned_resources(void) |
1040 | { | 1106 | { |
1041 | struct pci_bus *bus; | 1107 | struct pci_bus *bus; |
1042 | struct resource_list_x add_list; /* list of resources that | 1108 | struct resource_list_x realloc_list; /* list of resources that |
1043 | want additional resources */ | 1109 | want additional resources */ |
1044 | int tried_times = 0; | 1110 | int tried_times = 0; |
1045 | enum release_type rel_type = leaf_only; | 1111 | enum release_type rel_type = leaf_only; |
@@ -1052,7 +1118,7 @@ pci_assign_unassigned_resources(void) | |||
1052 | 1118 | ||
1053 | 1119 | ||
1054 | head.next = NULL; | 1120 | head.next = NULL; |
1055 | add_list.next = NULL; | 1121 | realloc_list.next = NULL; |
1056 | 1122 | ||
1057 | pci_try_num = max_depth + 1; | 1123 | pci_try_num = max_depth + 1; |
1058 | printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", | 1124 | printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", |
@@ -1062,12 +1128,12 @@ again: | |||
1062 | /* Depth first, calculate sizes and alignments of all | 1128 | /* Depth first, calculate sizes and alignments of all |
1063 | subordinate buses. */ | 1129 | subordinate buses. */ |
1064 | list_for_each_entry(bus, &pci_root_buses, node) | 1130 | list_for_each_entry(bus, &pci_root_buses, node) |
1065 | __pci_bus_size_bridges(bus, &add_list); | 1131 | __pci_bus_size_bridges(bus, &realloc_list); |
1066 | 1132 | ||
1067 | /* Depth last, allocate resources and update the hardware. */ | 1133 | /* Depth last, allocate resources and update the hardware. */ |
1068 | list_for_each_entry(bus, &pci_root_buses, node) | 1134 | list_for_each_entry(bus, &pci_root_buses, node) |
1069 | __pci_bus_assign_resources(bus, &add_list, &head); | 1135 | __pci_bus_assign_resources(bus, &realloc_list, &head); |
1070 | BUG_ON(add_list.next); | 1136 | BUG_ON(realloc_list.next); |
1071 | tried_times++; | 1137 | tried_times++; |
1072 | 1138 | ||
1073 | /* any device complain? */ | 1139 | /* any device complain? */ |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 319f359906e8..51a9095c7da4 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -128,16 +128,16 @@ void pci_disable_bridge_window(struct pci_dev *dev) | |||
128 | } | 128 | } |
129 | #endif /* CONFIG_PCI_QUIRKS */ | 129 | #endif /* CONFIG_PCI_QUIRKS */ |
130 | 130 | ||
131 | |||
132 | |||
131 | static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, | 133 | static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, |
132 | int resno) | 134 | int resno, resource_size_t size, resource_size_t align) |
133 | { | 135 | { |
134 | struct resource *res = dev->resource + resno; | 136 | struct resource *res = dev->resource + resno; |
135 | resource_size_t size, min, align; | 137 | resource_size_t min; |
136 | int ret; | 138 | int ret; |
137 | 139 | ||
138 | size = resource_size(res); | ||
139 | min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; | 140 | min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; |
140 | align = pci_resource_alignment(dev, res); | ||
141 | 141 | ||
142 | /* First, try exact prefetching match.. */ | 142 | /* First, try exact prefetching match.. */ |
143 | ret = pci_bus_alloc_resource(bus, res, size, align, min, | 143 | ret = pci_bus_alloc_resource(bus, res, size, align, min, |
@@ -154,56 +154,101 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, | |||
154 | ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, | 154 | ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, |
155 | pcibios_align_resource, dev); | 155 | pcibios_align_resource, dev); |
156 | } | 156 | } |
157 | return ret; | ||
158 | } | ||
157 | 159 | ||
158 | if (ret < 0 && dev->fw_addr[resno]) { | 160 | static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, |
159 | struct resource *root, *conflict; | 161 | int resno, resource_size_t size) |
160 | resource_size_t start, end; | 162 | { |
163 | struct resource *root, *conflict; | ||
164 | resource_size_t start, end; | ||
165 | int ret = 0; | ||
161 | 166 | ||
162 | /* | 167 | if (res->flags & IORESOURCE_IO) |
163 | * If we failed to assign anything, let's try the address | 168 | root = &ioport_resource; |
164 | * where firmware left it. That at least has a chance of | 169 | else |
165 | * working, which is better than just leaving it disabled. | 170 | root = &iomem_resource; |
166 | */ | 171 | |
172 | start = res->start; | ||
173 | end = res->end; | ||
174 | res->start = dev->fw_addr[resno]; | ||
175 | res->end = res->start + size - 1; | ||
176 | dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n", | ||
177 | resno, res); | ||
178 | conflict = request_resource_conflict(root, res); | ||
179 | if (conflict) { | ||
180 | dev_info(&dev->dev, | ||
181 | "BAR %d: %pR conflicts with %s %pR\n", resno, | ||
182 | res, conflict->name, conflict); | ||
183 | res->start = start; | ||
184 | res->end = end; | ||
185 | ret = 1; | ||
186 | } | ||
187 | return ret; | ||
188 | } | ||
189 | |||
190 | static int _pci_assign_resource(struct pci_dev *dev, int resno, int size, resource_size_t min_align) | ||
191 | { | ||
192 | struct resource *res = dev->resource + resno; | ||
193 | struct pci_bus *bus; | ||
194 | int ret; | ||
195 | char *type; | ||
167 | 196 | ||
168 | if (res->flags & IORESOURCE_IO) | 197 | bus = dev->bus; |
169 | root = &ioport_resource; | 198 | while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) { |
199 | if (!bus->parent || !bus->self->transparent) | ||
200 | break; | ||
201 | bus = bus->parent; | ||
202 | } | ||
203 | |||
204 | if (ret) { | ||
205 | if (res->flags & IORESOURCE_MEM) | ||
206 | if (res->flags & IORESOURCE_PREFETCH) | ||
207 | type = "mem pref"; | ||
208 | else | ||
209 | type = "mem"; | ||
210 | else if (res->flags & IORESOURCE_IO) | ||
211 | type = "io"; | ||
170 | else | 212 | else |
171 | root = &iomem_resource; | 213 | type = "unknown"; |
172 | 214 | dev_info(&dev->dev, | |
173 | start = res->start; | 215 | "BAR %d: can't assign %s (size %#llx)\n", |
174 | end = res->end; | 216 | resno, type, (unsigned long long) resource_size(res)); |
175 | res->start = dev->fw_addr[resno]; | ||
176 | res->end = res->start + size - 1; | ||
177 | dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n", | ||
178 | resno, res); | ||
179 | conflict = request_resource_conflict(root, res); | ||
180 | if (conflict) { | ||
181 | dev_info(&dev->dev, | ||
182 | "BAR %d: %pR conflicts with %s %pR\n", resno, | ||
183 | res, conflict->name, conflict); | ||
184 | res->start = start; | ||
185 | res->end = end; | ||
186 | } else | ||
187 | ret = 0; | ||
188 | } | 217 | } |
189 | 218 | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize, | ||
223 | resource_size_t min_align) | ||
224 | { | ||
225 | struct resource *res = dev->resource + resno; | ||
226 | resource_size_t new_size; | ||
227 | int ret; | ||
228 | |||
229 | if (!res->parent) { | ||
230 | dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resouce %pR " | ||
231 | "\n", resno, res); | ||
232 | return -EINVAL; | ||
233 | } | ||
234 | |||
235 | new_size = resource_size(res) + addsize + min_align; | ||
236 | ret = _pci_assign_resource(dev, resno, new_size, min_align); | ||
190 | if (!ret) { | 237 | if (!ret) { |
191 | res->flags &= ~IORESOURCE_STARTALIGN; | 238 | res->flags &= ~IORESOURCE_STARTALIGN; |
192 | dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); | 239 | dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); |
193 | if (resno < PCI_BRIDGE_RESOURCES) | 240 | if (resno < PCI_BRIDGE_RESOURCES) |
194 | pci_update_resource(dev, resno); | 241 | pci_update_resource(dev, resno); |
195 | } | 242 | } |
196 | |||
197 | return ret; | 243 | return ret; |
198 | } | 244 | } |
199 | 245 | ||
200 | int pci_assign_resource(struct pci_dev *dev, int resno) | 246 | int pci_assign_resource(struct pci_dev *dev, int resno) |
201 | { | 247 | { |
202 | struct resource *res = dev->resource + resno; | 248 | struct resource *res = dev->resource + resno; |
203 | resource_size_t align; | 249 | resource_size_t align, size; |
204 | struct pci_bus *bus; | 250 | struct pci_bus *bus; |
205 | int ret; | 251 | int ret; |
206 | char *type; | ||
207 | 252 | ||
208 | align = pci_resource_alignment(dev, res); | 253 | align = pci_resource_alignment(dev, res); |
209 | if (!align) { | 254 | if (!align) { |
@@ -213,34 +258,27 @@ int pci_assign_resource(struct pci_dev *dev, int resno) | |||
213 | } | 258 | } |
214 | 259 | ||
215 | bus = dev->bus; | 260 | bus = dev->bus; |
216 | while ((ret = __pci_assign_resource(bus, dev, resno))) { | 261 | size = resource_size(res); |
217 | if (bus->parent && bus->self->transparent) | 262 | ret = _pci_assign_resource(dev, resno, size, align); |
218 | bus = bus->parent; | ||
219 | else | ||
220 | bus = NULL; | ||
221 | if (bus) | ||
222 | continue; | ||
223 | break; | ||
224 | } | ||
225 | 263 | ||
226 | if (ret) { | 264 | /* |
227 | if (res->flags & IORESOURCE_MEM) | 265 | * If we failed to assign anything, let's try the address |
228 | if (res->flags & IORESOURCE_PREFETCH) | 266 | * where firmware left it. That at least has a chance of |
229 | type = "mem pref"; | 267 | * working, which is better than just leaving it disabled. |
230 | else | 268 | */ |
231 | type = "mem"; | 269 | if (ret < 0 && dev->fw_addr[resno]) |
232 | else if (res->flags & IORESOURCE_IO) | 270 | ret = pci_revert_fw_address(res, dev, resno, size); |
233 | type = "io"; | ||
234 | else | ||
235 | type = "unknown"; | ||
236 | dev_info(&dev->dev, | ||
237 | "BAR %d: can't assign %s (size %#llx)\n", | ||
238 | resno, type, (unsigned long long) resource_size(res)); | ||
239 | } | ||
240 | 271 | ||
272 | if (!ret) { | ||
273 | res->flags &= ~IORESOURCE_STARTALIGN; | ||
274 | dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); | ||
275 | if (resno < PCI_BRIDGE_RESOURCES) | ||
276 | pci_update_resource(dev, resno); | ||
277 | } | ||
241 | return ret; | 278 | return ret; |
242 | } | 279 | } |
243 | 280 | ||
281 | |||
244 | /* Sort resources by alignment */ | 282 | /* Sort resources by alignment */ |
245 | void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) | 283 | void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) |
246 | { | 284 | { |
diff --git a/drivers/power/max8997_charger.c b/drivers/power/max8997_charger.c index 7106b49b26e4..ffc5033ea9c9 100644 --- a/drivers/power/max8997_charger.c +++ b/drivers/power/max8997_charger.c | |||
@@ -20,6 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/err.h> | 22 | #include <linux/err.h> |
23 | #include <linux/module.h> | ||
23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
24 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
25 | #include <linux/power_supply.h> | 26 | #include <linux/power_supply.h> |
diff --git a/drivers/power/max8998_charger.c b/drivers/power/max8998_charger.c index cc21fa2120be..ef8efadb58cb 100644 --- a/drivers/power/max8998_charger.c +++ b/drivers/power/max8998_charger.c | |||
@@ -20,6 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/err.h> | 22 | #include <linux/err.h> |
23 | #include <linux/module.h> | ||
23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
24 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
25 | #include <linux/power_supply.h> | 26 | #include <linux/power_supply.h> |
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c index a675e31b4f13..d32d0d70f9ba 100644 --- a/drivers/power/s3c_adc_battery.c +++ b/drivers/power/s3c_adc_battery.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/s3c_adc_battery.h> | 20 | #include <linux/s3c_adc_battery.h> |
21 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/module.h> | ||
23 | 24 | ||
24 | #include <plat/adc.h> | 25 | #include <plat/adc.h> |
25 | 26 | ||
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index ee893581d4b7..ebe77dd87daf 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c | |||
@@ -505,8 +505,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, | |||
505 | rdev->dev.dma_mask = &rdev->dma_mask; | 505 | rdev->dev.dma_mask = &rdev->dma_mask; |
506 | rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | 506 | rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); |
507 | 507 | ||
508 | if ((rdev->pef & RIO_PEF_INB_DOORBELL) && | 508 | if (rdev->dst_ops & RIO_DST_OPS_DOORBELL) |
509 | (rdev->dst_ops & RIO_DST_OPS_DOORBELL)) | ||
510 | rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], | 509 | rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], |
511 | 0, 0xffff); | 510 | 0, 0xffff); |
512 | 511 | ||
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 3195dbd3ec34..44e91e598f8d 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c | |||
@@ -639,7 +639,7 @@ EXPORT_SYMBOL_GPL(rtc_irq_unregister); | |||
639 | static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled) | 639 | static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled) |
640 | { | 640 | { |
641 | /* | 641 | /* |
642 | * We unconditionally cancel the timer here, because otherwise | 642 | * We always cancel the timer here first, because otherwise |
643 | * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); | 643 | * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); |
644 | * when we manage to start the timer before the callback | 644 | * when we manage to start the timer before the callback |
645 | * returns HRTIMER_RESTART. | 645 | * returns HRTIMER_RESTART. |
@@ -708,7 +708,7 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq) | |||
708 | int err = 0; | 708 | int err = 0; |
709 | unsigned long flags; | 709 | unsigned long flags; |
710 | 710 | ||
711 | if (freq <= 0 || freq > 5000) | 711 | if (freq <= 0 || freq > RTC_MAX_FREQ) |
712 | return -EINVAL; | 712 | return -EINVAL; |
713 | retry: | 713 | retry: |
714 | spin_lock_irqsave(&rtc->irq_task_lock, flags); | 714 | spin_lock_irqsave(&rtc->irq_task_lock, flags); |
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c index 335551d333b2..14a42a1edc66 100644 --- a/drivers/rtc/rtc-ep93xx.c +++ b/drivers/rtc/rtc-ep93xx.c | |||
@@ -36,6 +36,7 @@ | |||
36 | */ | 36 | */ |
37 | struct ep93xx_rtc { | 37 | struct ep93xx_rtc { |
38 | void __iomem *mmio_base; | 38 | void __iomem *mmio_base; |
39 | struct rtc_device *rtc; | ||
39 | }; | 40 | }; |
40 | 41 | ||
41 | static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload, | 42 | static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload, |
@@ -130,7 +131,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) | |||
130 | { | 131 | { |
131 | struct ep93xx_rtc *ep93xx_rtc; | 132 | struct ep93xx_rtc *ep93xx_rtc; |
132 | struct resource *res; | 133 | struct resource *res; |
133 | struct rtc_device *rtc; | ||
134 | int err; | 134 | int err; |
135 | 135 | ||
136 | ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL); | 136 | ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL); |
@@ -151,12 +151,12 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) | |||
151 | return -ENXIO; | 151 | return -ENXIO; |
152 | 152 | ||
153 | pdev->dev.platform_data = ep93xx_rtc; | 153 | pdev->dev.platform_data = ep93xx_rtc; |
154 | platform_set_drvdata(pdev, rtc); | 154 | platform_set_drvdata(pdev, ep93xx_rtc); |
155 | 155 | ||
156 | rtc = rtc_device_register(pdev->name, | 156 | ep93xx_rtc->rtc = rtc_device_register(pdev->name, |
157 | &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); | 157 | &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); |
158 | if (IS_ERR(rtc)) { | 158 | if (IS_ERR(ep93xx_rtc->rtc)) { |
159 | err = PTR_ERR(rtc); | 159 | err = PTR_ERR(ep93xx_rtc->rtc); |
160 | goto exit; | 160 | goto exit; |
161 | } | 161 | } |
162 | 162 | ||
@@ -167,7 +167,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) | |||
167 | return 0; | 167 | return 0; |
168 | 168 | ||
169 | fail: | 169 | fail: |
170 | rtc_device_unregister(rtc); | 170 | rtc_device_unregister(ep93xx_rtc->rtc); |
171 | exit: | 171 | exit: |
172 | platform_set_drvdata(pdev, NULL); | 172 | platform_set_drvdata(pdev, NULL); |
173 | pdev->dev.platform_data = NULL; | 173 | pdev->dev.platform_data = NULL; |
@@ -176,11 +176,11 @@ exit: | |||
176 | 176 | ||
177 | static int __exit ep93xx_rtc_remove(struct platform_device *pdev) | 177 | static int __exit ep93xx_rtc_remove(struct platform_device *pdev) |
178 | { | 178 | { |
179 | struct rtc_device *rtc = platform_get_drvdata(pdev); | 179 | struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev); |
180 | 180 | ||
181 | sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); | 181 | sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); |
182 | platform_set_drvdata(pdev, NULL); | 182 | platform_set_drvdata(pdev, NULL); |
183 | rtc_device_unregister(rtc); | 183 | rtc_device_unregister(ep93xx_rtc->rtc); |
184 | pdev->dev.platform_data = NULL; | 184 | pdev->dev.platform_data = NULL; |
185 | 185 | ||
186 | return 0; | 186 | return 0; |
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c index 2dd3c0163272..d93a9608b1f0 100644 --- a/drivers/rtc/rtc-imxdi.c +++ b/drivers/rtc/rtc-imxdi.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/module.h> | 35 | #include <linux/module.h> |
36 | #include <linux/platform_device.h> | 36 | #include <linux/platform_device.h> |
37 | #include <linux/rtc.h> | 37 | #include <linux/rtc.h> |
38 | #include <linux/sched.h> | ||
38 | #include <linux/workqueue.h> | 39 | #include <linux/workqueue.h> |
39 | 40 | ||
40 | /* DryIce Register Definitions */ | 41 | /* DryIce Register Definitions */ |
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c index 075f1708deae..c4cf05731118 100644 --- a/drivers/rtc/rtc-lib.c +++ b/drivers/rtc/rtc-lib.c | |||
@@ -85,6 +85,8 @@ void rtc_time_to_tm(unsigned long time, struct rtc_time *tm) | |||
85 | time -= tm->tm_hour * 3600; | 85 | time -= tm->tm_hour * 3600; |
86 | tm->tm_min = time / 60; | 86 | tm->tm_min = time / 60; |
87 | tm->tm_sec = time - tm->tm_min * 60; | 87 | tm->tm_sec = time - tm->tm_min * 60; |
88 | |||
89 | tm->tm_isdst = 0; | ||
88 | } | 90 | } |
89 | EXPORT_SYMBOL(rtc_time_to_tm); | 91 | EXPORT_SYMBOL(rtc_time_to_tm); |
90 | 92 | ||
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 9329dbb9ebab..7639ab906f02 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
@@ -51,6 +51,27 @@ static enum s3c_cpu_type s3c_rtc_cpu_type; | |||
51 | 51 | ||
52 | static DEFINE_SPINLOCK(s3c_rtc_pie_lock); | 52 | static DEFINE_SPINLOCK(s3c_rtc_pie_lock); |
53 | 53 | ||
54 | static void s3c_rtc_alarm_clk_enable(bool enable) | ||
55 | { | ||
56 | static DEFINE_SPINLOCK(s3c_rtc_alarm_clk_lock); | ||
57 | static bool alarm_clk_enabled; | ||
58 | unsigned long irq_flags; | ||
59 | |||
60 | spin_lock_irqsave(&s3c_rtc_alarm_clk_lock, irq_flags); | ||
61 | if (enable) { | ||
62 | if (!alarm_clk_enabled) { | ||
63 | clk_enable(rtc_clk); | ||
64 | alarm_clk_enabled = true; | ||
65 | } | ||
66 | } else { | ||
67 | if (alarm_clk_enabled) { | ||
68 | clk_disable(rtc_clk); | ||
69 | alarm_clk_enabled = false; | ||
70 | } | ||
71 | } | ||
72 | spin_unlock_irqrestore(&s3c_rtc_alarm_clk_lock, irq_flags); | ||
73 | } | ||
74 | |||
54 | /* IRQ Handlers */ | 75 | /* IRQ Handlers */ |
55 | 76 | ||
56 | static irqreturn_t s3c_rtc_alarmirq(int irq, void *id) | 77 | static irqreturn_t s3c_rtc_alarmirq(int irq, void *id) |
@@ -64,6 +85,9 @@ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id) | |||
64 | writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP); | 85 | writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP); |
65 | 86 | ||
66 | clk_disable(rtc_clk); | 87 | clk_disable(rtc_clk); |
88 | |||
89 | s3c_rtc_alarm_clk_enable(false); | ||
90 | |||
67 | return IRQ_HANDLED; | 91 | return IRQ_HANDLED; |
68 | } | 92 | } |
69 | 93 | ||
@@ -97,6 +121,8 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) | |||
97 | writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); | 121 | writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); |
98 | clk_disable(rtc_clk); | 122 | clk_disable(rtc_clk); |
99 | 123 | ||
124 | s3c_rtc_alarm_clk_enable(enabled); | ||
125 | |||
100 | return 0; | 126 | return 0; |
101 | } | 127 | } |
102 | 128 | ||
@@ -152,10 +178,6 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) | |||
152 | goto retry_get_time; | 178 | goto retry_get_time; |
153 | } | 179 | } |
154 | 180 | ||
155 | pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n", | ||
156 | 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday, | ||
157 | rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec); | ||
158 | |||
159 | rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); | 181 | rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); |
160 | rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); | 182 | rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); |
161 | rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); | 183 | rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); |
@@ -164,6 +186,11 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) | |||
164 | rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); | 186 | rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); |
165 | 187 | ||
166 | rtc_tm->tm_year += 100; | 188 | rtc_tm->tm_year += 100; |
189 | |||
190 | pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n", | ||
191 | 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday, | ||
192 | rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec); | ||
193 | |||
167 | rtc_tm->tm_mon -= 1; | 194 | rtc_tm->tm_mon -= 1; |
168 | 195 | ||
169 | clk_disable(rtc_clk); | 196 | clk_disable(rtc_clk); |
@@ -269,10 +296,9 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
269 | clk_enable(rtc_clk); | 296 | clk_enable(rtc_clk); |
270 | pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n", | 297 | pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n", |
271 | alrm->enabled, | 298 | alrm->enabled, |
272 | 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, | 299 | 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, |
273 | tm->tm_hour, tm->tm_min, tm->tm_sec); | 300 | tm->tm_hour, tm->tm_min, tm->tm_sec); |
274 | 301 | ||
275 | |||
276 | alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; | 302 | alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; |
277 | writeb(0x00, base + S3C2410_RTCALM); | 303 | writeb(0x00, base + S3C2410_RTCALM); |
278 | 304 | ||
@@ -319,49 +345,7 @@ static int s3c_rtc_proc(struct device *dev, struct seq_file *seq) | |||
319 | return 0; | 345 | return 0; |
320 | } | 346 | } |
321 | 347 | ||
322 | static int s3c_rtc_open(struct device *dev) | ||
323 | { | ||
324 | struct platform_device *pdev = to_platform_device(dev); | ||
325 | struct rtc_device *rtc_dev = platform_get_drvdata(pdev); | ||
326 | int ret; | ||
327 | |||
328 | ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq, | ||
329 | IRQF_DISABLED, "s3c2410-rtc alarm", rtc_dev); | ||
330 | |||
331 | if (ret) { | ||
332 | dev_err(dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret); | ||
333 | return ret; | ||
334 | } | ||
335 | |||
336 | ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq, | ||
337 | IRQF_DISABLED, "s3c2410-rtc tick", rtc_dev); | ||
338 | |||
339 | if (ret) { | ||
340 | dev_err(dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret); | ||
341 | goto tick_err; | ||
342 | } | ||
343 | |||
344 | return ret; | ||
345 | |||
346 | tick_err: | ||
347 | free_irq(s3c_rtc_alarmno, rtc_dev); | ||
348 | return ret; | ||
349 | } | ||
350 | |||
351 | static void s3c_rtc_release(struct device *dev) | ||
352 | { | ||
353 | struct platform_device *pdev = to_platform_device(dev); | ||
354 | struct rtc_device *rtc_dev = platform_get_drvdata(pdev); | ||
355 | |||
356 | /* do not clear AIE here, it may be needed for wake */ | ||
357 | |||
358 | free_irq(s3c_rtc_alarmno, rtc_dev); | ||
359 | free_irq(s3c_rtc_tickno, rtc_dev); | ||
360 | } | ||
361 | |||
362 | static const struct rtc_class_ops s3c_rtcops = { | 348 | static const struct rtc_class_ops s3c_rtcops = { |
363 | .open = s3c_rtc_open, | ||
364 | .release = s3c_rtc_release, | ||
365 | .read_time = s3c_rtc_gettime, | 349 | .read_time = s3c_rtc_gettime, |
366 | .set_time = s3c_rtc_settime, | 350 | .set_time = s3c_rtc_settime, |
367 | .read_alarm = s3c_rtc_getalarm, | 351 | .read_alarm = s3c_rtc_getalarm, |
@@ -425,6 +409,9 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev) | |||
425 | { | 409 | { |
426 | struct rtc_device *rtc = platform_get_drvdata(dev); | 410 | struct rtc_device *rtc = platform_get_drvdata(dev); |
427 | 411 | ||
412 | free_irq(s3c_rtc_alarmno, rtc); | ||
413 | free_irq(s3c_rtc_tickno, rtc); | ||
414 | |||
428 | platform_set_drvdata(dev, NULL); | 415 | platform_set_drvdata(dev, NULL); |
429 | rtc_device_unregister(rtc); | 416 | rtc_device_unregister(rtc); |
430 | 417 | ||
@@ -548,10 +535,32 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev) | |||
548 | 535 | ||
549 | s3c_rtc_setfreq(&pdev->dev, 1); | 536 | s3c_rtc_setfreq(&pdev->dev, 1); |
550 | 537 | ||
538 | ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq, | ||
539 | IRQF_DISABLED, "s3c2410-rtc alarm", rtc); | ||
540 | if (ret) { | ||
541 | dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret); | ||
542 | goto err_alarm_irq; | ||
543 | } | ||
544 | |||
545 | ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq, | ||
546 | IRQF_DISABLED, "s3c2410-rtc tick", rtc); | ||
547 | if (ret) { | ||
548 | dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret); | ||
549 | free_irq(s3c_rtc_alarmno, rtc); | ||
550 | goto err_tick_irq; | ||
551 | } | ||
552 | |||
551 | clk_disable(rtc_clk); | 553 | clk_disable(rtc_clk); |
552 | 554 | ||
553 | return 0; | 555 | return 0; |
554 | 556 | ||
557 | err_tick_irq: | ||
558 | free_irq(s3c_rtc_alarmno, rtc); | ||
559 | |||
560 | err_alarm_irq: | ||
561 | platform_set_drvdata(pdev, NULL); | ||
562 | rtc_device_unregister(rtc); | ||
563 | |||
555 | err_nortc: | 564 | err_nortc: |
556 | s3c_rtc_enable(pdev, 0); | 565 | s3c_rtc_enable(pdev, 0); |
557 | clk_disable(rtc_clk); | 566 | clk_disable(rtc_clk); |
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c index 9a81f778d6b2..20687d55e7a7 100644 --- a/drivers/rtc/rtc-twl.c +++ b/drivers/rtc/rtc-twl.c | |||
@@ -362,14 +362,6 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc) | |||
362 | int res; | 362 | int res; |
363 | u8 rd_reg; | 363 | u8 rd_reg; |
364 | 364 | ||
365 | #ifdef CONFIG_LOCKDEP | ||
366 | /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which | ||
367 | * we don't want and can't tolerate. Although it might be | ||
368 | * friendlier not to borrow this thread context... | ||
369 | */ | ||
370 | local_irq_enable(); | ||
371 | #endif | ||
372 | |||
373 | res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); | 365 | res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); |
374 | if (res) | 366 | if (res) |
375 | goto out; | 367 | goto out; |
@@ -428,24 +420,12 @@ static struct rtc_class_ops twl_rtc_ops = { | |||
428 | static int __devinit twl_rtc_probe(struct platform_device *pdev) | 420 | static int __devinit twl_rtc_probe(struct platform_device *pdev) |
429 | { | 421 | { |
430 | struct rtc_device *rtc; | 422 | struct rtc_device *rtc; |
431 | int ret = 0; | 423 | int ret = -EINVAL; |
432 | int irq = platform_get_irq(pdev, 0); | 424 | int irq = platform_get_irq(pdev, 0); |
433 | u8 rd_reg; | 425 | u8 rd_reg; |
434 | 426 | ||
435 | if (irq <= 0) | 427 | if (irq <= 0) |
436 | return -EINVAL; | 428 | goto out1; |
437 | |||
438 | rtc = rtc_device_register(pdev->name, | ||
439 | &pdev->dev, &twl_rtc_ops, THIS_MODULE); | ||
440 | if (IS_ERR(rtc)) { | ||
441 | ret = PTR_ERR(rtc); | ||
442 | dev_err(&pdev->dev, "can't register RTC device, err %ld\n", | ||
443 | PTR_ERR(rtc)); | ||
444 | goto out0; | ||
445 | |||
446 | } | ||
447 | |||
448 | platform_set_drvdata(pdev, rtc); | ||
449 | 429 | ||
450 | ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); | 430 | ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); |
451 | if (ret < 0) | 431 | if (ret < 0) |
@@ -462,14 +442,6 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev) | |||
462 | if (ret < 0) | 442 | if (ret < 0) |
463 | goto out1; | 443 | goto out1; |
464 | 444 | ||
465 | ret = request_irq(irq, twl_rtc_interrupt, | ||
466 | IRQF_TRIGGER_RISING, | ||
467 | dev_name(&rtc->dev), rtc); | ||
468 | if (ret < 0) { | ||
469 | dev_err(&pdev->dev, "IRQ is not free.\n"); | ||
470 | goto out1; | ||
471 | } | ||
472 | |||
473 | if (twl_class_is_6030()) { | 445 | if (twl_class_is_6030()) { |
474 | twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK, | 446 | twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK, |
475 | REG_INT_MSK_LINE_A); | 447 | REG_INT_MSK_LINE_A); |
@@ -480,28 +452,44 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev) | |||
480 | /* Check RTC module status, Enable if it is off */ | 452 | /* Check RTC module status, Enable if it is off */ |
481 | ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG); | 453 | ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG); |
482 | if (ret < 0) | 454 | if (ret < 0) |
483 | goto out2; | 455 | goto out1; |
484 | 456 | ||
485 | if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) { | 457 | if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) { |
486 | dev_info(&pdev->dev, "Enabling TWL-RTC.\n"); | 458 | dev_info(&pdev->dev, "Enabling TWL-RTC.\n"); |
487 | rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M; | 459 | rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M; |
488 | ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG); | 460 | ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG); |
489 | if (ret < 0) | 461 | if (ret < 0) |
490 | goto out2; | 462 | goto out1; |
491 | } | 463 | } |
492 | 464 | ||
493 | /* init cached IRQ enable bits */ | 465 | /* init cached IRQ enable bits */ |
494 | ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); | 466 | ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); |
495 | if (ret < 0) | 467 | if (ret < 0) |
468 | goto out1; | ||
469 | |||
470 | rtc = rtc_device_register(pdev->name, | ||
471 | &pdev->dev, &twl_rtc_ops, THIS_MODULE); | ||
472 | if (IS_ERR(rtc)) { | ||
473 | ret = PTR_ERR(rtc); | ||
474 | dev_err(&pdev->dev, "can't register RTC device, err %ld\n", | ||
475 | PTR_ERR(rtc)); | ||
476 | goto out1; | ||
477 | } | ||
478 | |||
479 | ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt, | ||
480 | IRQF_TRIGGER_RISING, | ||
481 | dev_name(&rtc->dev), rtc); | ||
482 | if (ret < 0) { | ||
483 | dev_err(&pdev->dev, "IRQ is not free.\n"); | ||
496 | goto out2; | 484 | goto out2; |
485 | } | ||
497 | 486 | ||
498 | return ret; | 487 | platform_set_drvdata(pdev, rtc); |
488 | return 0; | ||
499 | 489 | ||
500 | out2: | 490 | out2: |
501 | free_irq(irq, rtc); | ||
502 | out1: | ||
503 | rtc_device_unregister(rtc); | 491 | rtc_device_unregister(rtc); |
504 | out0: | 492 | out1: |
505 | return ret; | 493 | return ret; |
506 | } | 494 | } |
507 | 495 | ||
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index eb4e034378cd..f1a2016829fc 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c | |||
@@ -249,6 +249,7 @@ static int dasd_ioctl_reset_profile(struct dasd_block *block) | |||
249 | static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp) | 249 | static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp) |
250 | { | 250 | { |
251 | struct dasd_profile_info_t *data; | 251 | struct dasd_profile_info_t *data; |
252 | int rc = 0; | ||
252 | 253 | ||
253 | data = kmalloc(sizeof(*data), GFP_KERNEL); | 254 | data = kmalloc(sizeof(*data), GFP_KERNEL); |
254 | if (!data) | 255 | if (!data) |
@@ -279,11 +280,14 @@ static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp) | |||
279 | spin_unlock_bh(&block->profile.lock); | 280 | spin_unlock_bh(&block->profile.lock); |
280 | } else { | 281 | } else { |
281 | spin_unlock_bh(&block->profile.lock); | 282 | spin_unlock_bh(&block->profile.lock); |
282 | return -EIO; | 283 | rc = -EIO; |
284 | goto out; | ||
283 | } | 285 | } |
284 | if (copy_to_user(argp, data, sizeof(*data))) | 286 | if (copy_to_user(argp, data, sizeof(*data))) |
285 | return -EFAULT; | 287 | rc = -EFAULT; |
286 | return 0; | 288 | out: |
289 | kfree(data); | ||
290 | return rc; | ||
287 | } | 291 | } |
288 | #else | 292 | #else |
289 | static int dasd_ioctl_reset_profile(struct dasd_block *block) | 293 | static int dasd_ioctl_reset_profile(struct dasd_block *block) |
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index be55fb2b1b1c..837e010299a8 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -383,8 +383,10 @@ static int sclp_attach_storage(u8 id) | |||
383 | switch (sccb->header.response_code) { | 383 | switch (sccb->header.response_code) { |
384 | case 0x0020: | 384 | case 0x0020: |
385 | set_bit(id, sclp_storage_ids); | 385 | set_bit(id, sclp_storage_ids); |
386 | for (i = 0; i < sccb->assigned; i++) | 386 | for (i = 0; i < sccb->assigned; i++) { |
387 | sclp_unassign_storage(sccb->entries[i] >> 16); | 387 | if (sccb->entries[i]) |
388 | sclp_unassign_storage(sccb->entries[i] >> 16); | ||
389 | } | ||
388 | break; | 390 | break; |
389 | default: | 391 | default: |
390 | rc = -EIO; | 392 | rc = -EIO; |
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 9ae80cd5953b..dba72a4e6a1c 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c | |||
@@ -563,7 +563,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, | |||
563 | nopout_wqe->itt = ((u16)task->itt | | 563 | nopout_wqe->itt = ((u16)task->itt | |
564 | (ISCSI_TASK_TYPE_MPATH << | 564 | (ISCSI_TASK_TYPE_MPATH << |
565 | ISCSI_TMF_REQUEST_TYPE_SHIFT)); | 565 | ISCSI_TMF_REQUEST_TYPE_SHIFT)); |
566 | nopout_wqe->ttt = nopout_hdr->ttt; | 566 | nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt); |
567 | nopout_wqe->flags = 0; | 567 | nopout_wqe->flags = 0; |
568 | if (!unsol) | 568 | if (!unsol) |
569 | nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; | 569 | nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; |
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 83aa3ac52c40..9d3d81778af1 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
@@ -432,6 +432,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) | |||
432 | u8 flogi_maddr[ETH_ALEN]; | 432 | u8 flogi_maddr[ETH_ALEN]; |
433 | const struct net_device_ops *ops; | 433 | const struct net_device_ops *ops; |
434 | 434 | ||
435 | rtnl_lock(); | ||
436 | |||
435 | /* | 437 | /* |
436 | * Don't listen for Ethernet packets anymore. | 438 | * Don't listen for Ethernet packets anymore. |
437 | * synchronize_net() ensures that the packet handlers are not running | 439 | * synchronize_net() ensures that the packet handlers are not running |
@@ -461,6 +463,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) | |||
461 | " specific feature for LLD.\n"); | 463 | " specific feature for LLD.\n"); |
462 | } | 464 | } |
463 | 465 | ||
466 | rtnl_unlock(); | ||
467 | |||
464 | /* Release the self-reference taken during fcoe_interface_create() */ | 468 | /* Release the self-reference taken during fcoe_interface_create() */ |
465 | fcoe_interface_put(fcoe); | 469 | fcoe_interface_put(fcoe); |
466 | } | 470 | } |
@@ -1951,11 +1955,8 @@ static void fcoe_destroy_work(struct work_struct *work) | |||
1951 | fcoe_if_destroy(port->lport); | 1955 | fcoe_if_destroy(port->lport); |
1952 | 1956 | ||
1953 | /* Do not tear down the fcoe interface for NPIV port */ | 1957 | /* Do not tear down the fcoe interface for NPIV port */ |
1954 | if (!npiv) { | 1958 | if (!npiv) |
1955 | rtnl_lock(); | ||
1956 | fcoe_interface_cleanup(fcoe); | 1959 | fcoe_interface_cleanup(fcoe); |
1957 | rtnl_unlock(); | ||
1958 | } | ||
1959 | 1960 | ||
1960 | mutex_unlock(&fcoe_config_mutex); | 1961 | mutex_unlock(&fcoe_config_mutex); |
1961 | } | 1962 | } |
@@ -2009,8 +2010,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) | |||
2009 | printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", | 2010 | printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", |
2010 | netdev->name); | 2011 | netdev->name); |
2011 | rc = -EIO; | 2012 | rc = -EIO; |
2013 | rtnl_unlock(); | ||
2012 | fcoe_interface_cleanup(fcoe); | 2014 | fcoe_interface_cleanup(fcoe); |
2013 | goto out_nodev; | 2015 | goto out_nortnl; |
2014 | } | 2016 | } |
2015 | 2017 | ||
2016 | /* Make this the "master" N_Port */ | 2018 | /* Make this the "master" N_Port */ |
@@ -2027,6 +2029,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) | |||
2027 | 2029 | ||
2028 | out_nodev: | 2030 | out_nodev: |
2029 | rtnl_unlock(); | 2031 | rtnl_unlock(); |
2032 | out_nortnl: | ||
2030 | mutex_unlock(&fcoe_config_mutex); | 2033 | mutex_unlock(&fcoe_config_mutex); |
2031 | return rc; | 2034 | return rc; |
2032 | } | 2035 | } |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index ec61bdb833ac..b200b736b000 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -676,6 +676,16 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, | |||
676 | BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); | 676 | BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); |
677 | removed[*nremoved] = h->dev[entry]; | 677 | removed[*nremoved] = h->dev[entry]; |
678 | (*nremoved)++; | 678 | (*nremoved)++; |
679 | |||
680 | /* | ||
681 | * New physical devices won't have target/lun assigned yet | ||
682 | * so we need to preserve the values in the slot we are replacing. | ||
683 | */ | ||
684 | if (new_entry->target == -1) { | ||
685 | new_entry->target = h->dev[entry]->target; | ||
686 | new_entry->lun = h->dev[entry]->lun; | ||
687 | } | ||
688 | |||
679 | h->dev[entry] = new_entry; | 689 | h->dev[entry] = new_entry; |
680 | added[*nadded] = new_entry; | 690 | added[*nadded] = new_entry; |
681 | (*nadded)++; | 691 | (*nadded)++; |
@@ -1548,10 +1558,17 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, | |||
1548 | } | 1558 | } |
1549 | 1559 | ||
1550 | static int hpsa_update_device_info(struct ctlr_info *h, | 1560 | static int hpsa_update_device_info(struct ctlr_info *h, |
1551 | unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) | 1561 | unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, |
1562 | unsigned char *is_OBDR_device) | ||
1552 | { | 1563 | { |
1553 | #define OBDR_TAPE_INQ_SIZE 49 | 1564 | |
1565 | #define OBDR_SIG_OFFSET 43 | ||
1566 | #define OBDR_TAPE_SIG "$DR-10" | ||
1567 | #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) | ||
1568 | #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) | ||
1569 | |||
1554 | unsigned char *inq_buff; | 1570 | unsigned char *inq_buff; |
1571 | unsigned char *obdr_sig; | ||
1555 | 1572 | ||
1556 | inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); | 1573 | inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); |
1557 | if (!inq_buff) | 1574 | if (!inq_buff) |
@@ -1583,6 +1600,16 @@ static int hpsa_update_device_info(struct ctlr_info *h, | |||
1583 | else | 1600 | else |
1584 | this_device->raid_level = RAID_UNKNOWN; | 1601 | this_device->raid_level = RAID_UNKNOWN; |
1585 | 1602 | ||
1603 | if (is_OBDR_device) { | ||
1604 | /* See if this is a One-Button-Disaster-Recovery device | ||
1605 | * by looking for "$DR-10" at offset 43 in inquiry data. | ||
1606 | */ | ||
1607 | obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; | ||
1608 | *is_OBDR_device = (this_device->devtype == TYPE_ROM && | ||
1609 | strncmp(obdr_sig, OBDR_TAPE_SIG, | ||
1610 | OBDR_SIG_LEN) == 0); | ||
1611 | } | ||
1612 | |||
1586 | kfree(inq_buff); | 1613 | kfree(inq_buff); |
1587 | return 0; | 1614 | return 0; |
1588 | 1615 | ||
@@ -1716,7 +1743,7 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h, | |||
1716 | return 0; | 1743 | return 0; |
1717 | } | 1744 | } |
1718 | 1745 | ||
1719 | if (hpsa_update_device_info(h, scsi3addr, this_device)) | 1746 | if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) |
1720 | return 0; | 1747 | return 0; |
1721 | (*nmsa2xxx_enclosures)++; | 1748 | (*nmsa2xxx_enclosures)++; |
1722 | hpsa_set_bus_target_lun(this_device, bus, target, 0); | 1749 | hpsa_set_bus_target_lun(this_device, bus, target, 0); |
@@ -1808,7 +1835,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
1808 | */ | 1835 | */ |
1809 | struct ReportLUNdata *physdev_list = NULL; | 1836 | struct ReportLUNdata *physdev_list = NULL; |
1810 | struct ReportLUNdata *logdev_list = NULL; | 1837 | struct ReportLUNdata *logdev_list = NULL; |
1811 | unsigned char *inq_buff = NULL; | ||
1812 | u32 nphysicals = 0; | 1838 | u32 nphysicals = 0; |
1813 | u32 nlogicals = 0; | 1839 | u32 nlogicals = 0; |
1814 | u32 ndev_allocated = 0; | 1840 | u32 ndev_allocated = 0; |
@@ -1824,11 +1850,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
1824 | GFP_KERNEL); | 1850 | GFP_KERNEL); |
1825 | physdev_list = kzalloc(reportlunsize, GFP_KERNEL); | 1851 | physdev_list = kzalloc(reportlunsize, GFP_KERNEL); |
1826 | logdev_list = kzalloc(reportlunsize, GFP_KERNEL); | 1852 | logdev_list = kzalloc(reportlunsize, GFP_KERNEL); |
1827 | inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); | ||
1828 | tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); | 1853 | tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); |
1829 | 1854 | ||
1830 | if (!currentsd || !physdev_list || !logdev_list || | 1855 | if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { |
1831 | !inq_buff || !tmpdevice) { | ||
1832 | dev_err(&h->pdev->dev, "out of memory\n"); | 1856 | dev_err(&h->pdev->dev, "out of memory\n"); |
1833 | goto out; | 1857 | goto out; |
1834 | } | 1858 | } |
@@ -1863,7 +1887,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
1863 | /* adjust our table of devices */ | 1887 | /* adjust our table of devices */ |
1864 | nmsa2xxx_enclosures = 0; | 1888 | nmsa2xxx_enclosures = 0; |
1865 | for (i = 0; i < nphysicals + nlogicals + 1; i++) { | 1889 | for (i = 0; i < nphysicals + nlogicals + 1; i++) { |
1866 | u8 *lunaddrbytes; | 1890 | u8 *lunaddrbytes, is_OBDR = 0; |
1867 | 1891 | ||
1868 | /* Figure out where the LUN ID info is coming from */ | 1892 | /* Figure out where the LUN ID info is coming from */ |
1869 | lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, | 1893 | lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, |
@@ -1874,7 +1898,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
1874 | continue; | 1898 | continue; |
1875 | 1899 | ||
1876 | /* Get device type, vendor, model, device id */ | 1900 | /* Get device type, vendor, model, device id */ |
1877 | if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice)) | 1901 | if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, |
1902 | &is_OBDR)) | ||
1878 | continue; /* skip it if we can't talk to it. */ | 1903 | continue; /* skip it if we can't talk to it. */ |
1879 | figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, | 1904 | figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, |
1880 | tmpdevice); | 1905 | tmpdevice); |
@@ -1898,7 +1923,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
1898 | hpsa_set_bus_target_lun(this_device, bus, target, lun); | 1923 | hpsa_set_bus_target_lun(this_device, bus, target, lun); |
1899 | 1924 | ||
1900 | switch (this_device->devtype) { | 1925 | switch (this_device->devtype) { |
1901 | case TYPE_ROM: { | 1926 | case TYPE_ROM: |
1902 | /* We don't *really* support actual CD-ROM devices, | 1927 | /* We don't *really* support actual CD-ROM devices, |
1903 | * just "One Button Disaster Recovery" tape drive | 1928 | * just "One Button Disaster Recovery" tape drive |
1904 | * which temporarily pretends to be a CD-ROM drive. | 1929 | * which temporarily pretends to be a CD-ROM drive. |
@@ -1906,15 +1931,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
1906 | * device by checking for "$DR-10" in bytes 43-48 of | 1931 | * device by checking for "$DR-10" in bytes 43-48 of |
1907 | * the inquiry data. | 1932 | * the inquiry data. |
1908 | */ | 1933 | */ |
1909 | char obdr_sig[7]; | 1934 | if (is_OBDR) |
1910 | #define OBDR_TAPE_SIG "$DR-10" | 1935 | ncurrent++; |
1911 | strncpy(obdr_sig, &inq_buff[43], 6); | ||
1912 | obdr_sig[6] = '\0'; | ||
1913 | if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) | ||
1914 | /* Not OBDR device, ignore it. */ | ||
1915 | break; | ||
1916 | } | ||
1917 | ncurrent++; | ||
1918 | break; | 1936 | break; |
1919 | case TYPE_DISK: | 1937 | case TYPE_DISK: |
1920 | if (i < nphysicals) | 1938 | if (i < nphysicals) |
@@ -1947,7 +1965,6 @@ out: | |||
1947 | for (i = 0; i < ndev_allocated; i++) | 1965 | for (i = 0; i < ndev_allocated; i++) |
1948 | kfree(currentsd[i]); | 1966 | kfree(currentsd[i]); |
1949 | kfree(currentsd); | 1967 | kfree(currentsd); |
1950 | kfree(inq_buff); | ||
1951 | kfree(physdev_list); | 1968 | kfree(physdev_list); |
1952 | kfree(logdev_list); | 1969 | kfree(logdev_list); |
1953 | } | 1970 | } |
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 26072f1e9852..6981b773a88d 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c | |||
@@ -531,6 +531,9 @@ static void sci_controller_process_completions(struct isci_host *ihost) | |||
531 | break; | 531 | break; |
532 | 532 | ||
533 | case SCU_COMPLETION_TYPE_EVENT: | 533 | case SCU_COMPLETION_TYPE_EVENT: |
534 | sci_controller_event_completion(ihost, ent); | ||
535 | break; | ||
536 | |||
534 | case SCU_COMPLETION_TYPE_NOTIFY: { | 537 | case SCU_COMPLETION_TYPE_NOTIFY: { |
535 | event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) << | 538 | event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) << |
536 | (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); | 539 | (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); |
@@ -1091,6 +1094,7 @@ static void isci_host_completion_routine(unsigned long data) | |||
1091 | struct isci_request *request; | 1094 | struct isci_request *request; |
1092 | struct isci_request *next_request; | 1095 | struct isci_request *next_request; |
1093 | struct sas_task *task; | 1096 | struct sas_task *task; |
1097 | u16 active; | ||
1094 | 1098 | ||
1095 | INIT_LIST_HEAD(&completed_request_list); | 1099 | INIT_LIST_HEAD(&completed_request_list); |
1096 | INIT_LIST_HEAD(&errored_request_list); | 1100 | INIT_LIST_HEAD(&errored_request_list); |
@@ -1181,6 +1185,13 @@ static void isci_host_completion_routine(unsigned long data) | |||
1181 | } | 1185 | } |
1182 | } | 1186 | } |
1183 | 1187 | ||
1188 | /* the coalesence timeout doubles at each encoding step, so | ||
1189 | * update it based on the ilog2 value of the outstanding requests | ||
1190 | */ | ||
1191 | active = isci_tci_active(ihost); | ||
1192 | writel(SMU_ICC_GEN_VAL(NUMBER, active) | | ||
1193 | SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)), | ||
1194 | &ihost->smu_registers->interrupt_coalesce_control); | ||
1184 | } | 1195 | } |
1185 | 1196 | ||
1186 | /** | 1197 | /** |
@@ -1471,7 +1482,7 @@ static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm) | |||
1471 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); | 1482 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1472 | 1483 | ||
1473 | /* set the default interrupt coalescence number and timeout value. */ | 1484 | /* set the default interrupt coalescence number and timeout value. */ |
1474 | sci_controller_set_interrupt_coalescence(ihost, 0x10, 250); | 1485 | sci_controller_set_interrupt_coalescence(ihost, 0, 0); |
1475 | } | 1486 | } |
1476 | 1487 | ||
1477 | static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) | 1488 | static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) |
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 062101a39f79..9f33831a2f04 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h | |||
@@ -369,6 +369,9 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) | |||
369 | #define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1)) | 369 | #define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1)) |
370 | #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) | 370 | #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) |
371 | 371 | ||
372 | /* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */ | ||
373 | #define ISCI_COALESCE_BASE 9 | ||
374 | |||
372 | /* expander attached sata devices require 3 rnc slots */ | 375 | /* expander attached sata devices require 3 rnc slots */ |
373 | static inline int sci_remote_device_node_count(struct isci_remote_device *idev) | 376 | static inline int sci_remote_device_node_count(struct isci_remote_device *idev) |
374 | { | 377 | { |
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 61e0d09e2b57..29aa34efb0f5 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c | |||
@@ -59,10 +59,19 @@ | |||
59 | #include <linux/firmware.h> | 59 | #include <linux/firmware.h> |
60 | #include <linux/efi.h> | 60 | #include <linux/efi.h> |
61 | #include <asm/string.h> | 61 | #include <asm/string.h> |
62 | #include <scsi/scsi_host.h> | ||
62 | #include "isci.h" | 63 | #include "isci.h" |
63 | #include "task.h" | 64 | #include "task.h" |
64 | #include "probe_roms.h" | 65 | #include "probe_roms.h" |
65 | 66 | ||
67 | #define MAJ 1 | ||
68 | #define MIN 0 | ||
69 | #define BUILD 0 | ||
70 | #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ | ||
71 | __stringify(BUILD) | ||
72 | |||
73 | MODULE_VERSION(DRV_VERSION); | ||
74 | |||
66 | static struct scsi_transport_template *isci_transport_template; | 75 | static struct scsi_transport_template *isci_transport_template; |
67 | 76 | ||
68 | static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = { | 77 | static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = { |
@@ -113,6 +122,22 @@ unsigned char max_concurr_spinup = 1; | |||
113 | module_param(max_concurr_spinup, byte, 0); | 122 | module_param(max_concurr_spinup, byte, 0); |
114 | MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); | 123 | MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); |
115 | 124 | ||
125 | static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) | ||
126 | { | ||
127 | struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); | ||
128 | struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); | ||
129 | struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); | ||
130 | |||
131 | return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); | ||
132 | } | ||
133 | |||
134 | static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); | ||
135 | |||
136 | struct device_attribute *isci_host_attrs[] = { | ||
137 | &dev_attr_isci_id, | ||
138 | NULL | ||
139 | }; | ||
140 | |||
116 | static struct scsi_host_template isci_sht = { | 141 | static struct scsi_host_template isci_sht = { |
117 | 142 | ||
118 | .module = THIS_MODULE, | 143 | .module = THIS_MODULE, |
@@ -138,6 +163,7 @@ static struct scsi_host_template isci_sht = { | |||
138 | .slave_alloc = sas_slave_alloc, | 163 | .slave_alloc = sas_slave_alloc, |
139 | .target_destroy = sas_target_destroy, | 164 | .target_destroy = sas_target_destroy, |
140 | .ioctl = sas_ioctl, | 165 | .ioctl = sas_ioctl, |
166 | .shost_attrs = isci_host_attrs, | ||
141 | }; | 167 | }; |
142 | 168 | ||
143 | static struct sas_domain_function_template isci_transport_ops = { | 169 | static struct sas_domain_function_template isci_transport_ops = { |
@@ -232,17 +258,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host) | |||
232 | return 0; | 258 | return 0; |
233 | } | 259 | } |
234 | 260 | ||
235 | static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) | ||
236 | { | ||
237 | struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); | ||
238 | struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); | ||
239 | struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); | ||
240 | |||
241 | return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); | ||
242 | } | ||
243 | |||
244 | static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); | ||
245 | |||
246 | static void isci_unregister(struct isci_host *isci_host) | 261 | static void isci_unregister(struct isci_host *isci_host) |
247 | { | 262 | { |
248 | struct Scsi_Host *shost; | 263 | struct Scsi_Host *shost; |
@@ -251,7 +266,6 @@ static void isci_unregister(struct isci_host *isci_host) | |||
251 | return; | 266 | return; |
252 | 267 | ||
253 | shost = isci_host->shost; | 268 | shost = isci_host->shost; |
254 | device_remove_file(&shost->shost_dev, &dev_attr_isci_id); | ||
255 | 269 | ||
256 | sas_unregister_ha(&isci_host->sas_ha); | 270 | sas_unregister_ha(&isci_host->sas_ha); |
257 | 271 | ||
@@ -415,14 +429,8 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) | |||
415 | if (err) | 429 | if (err) |
416 | goto err_shost_remove; | 430 | goto err_shost_remove; |
417 | 431 | ||
418 | err = device_create_file(&shost->shost_dev, &dev_attr_isci_id); | ||
419 | if (err) | ||
420 | goto err_unregister_ha; | ||
421 | |||
422 | return isci_host; | 432 | return isci_host; |
423 | 433 | ||
424 | err_unregister_ha: | ||
425 | sas_unregister_ha(&(isci_host->sas_ha)); | ||
426 | err_shost_remove: | 434 | err_shost_remove: |
427 | scsi_remove_host(shost); | 435 | scsi_remove_host(shost); |
428 | err_shost: | 436 | err_shost: |
@@ -540,7 +548,8 @@ static __init int isci_init(void) | |||
540 | { | 548 | { |
541 | int err; | 549 | int err; |
542 | 550 | ||
543 | pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME); | 551 | pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n", |
552 | DRV_NAME, DRV_VERSION); | ||
544 | 553 | ||
545 | isci_transport_template = sas_domain_attach_transport(&isci_transport_ops); | 554 | isci_transport_template = sas_domain_attach_transport(&isci_transport_ops); |
546 | if (!isci_transport_template) | 555 | if (!isci_transport_template) |
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index 79313a7a2356..430fc8ff014a 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c | |||
@@ -104,6 +104,7 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, | |||
104 | u32 parity_count = 0; | 104 | u32 parity_count = 0; |
105 | u32 llctl, link_rate; | 105 | u32 llctl, link_rate; |
106 | u32 clksm_value = 0; | 106 | u32 clksm_value = 0; |
107 | u32 sp_timeouts = 0; | ||
107 | 108 | ||
108 | iphy->link_layer_registers = reg; | 109 | iphy->link_layer_registers = reg; |
109 | 110 | ||
@@ -211,6 +212,18 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, | |||
211 | llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); | 212 | llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); |
212 | writel(llctl, &iphy->link_layer_registers->link_layer_control); | 213 | writel(llctl, &iphy->link_layer_registers->link_layer_control); |
213 | 214 | ||
215 | sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts); | ||
216 | |||
217 | /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */ | ||
218 | sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); | ||
219 | |||
220 | /* Set RATE_CHANGE timeout value to 0x3B (59us). This ensures SCU can | ||
221 | * lock with 3Gb drive when SCU max rate is set to 1.5Gb. | ||
222 | */ | ||
223 | sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); | ||
224 | |||
225 | writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts); | ||
226 | |||
214 | if (is_a2(ihost->pdev)) { | 227 | if (is_a2(ihost->pdev)) { |
215 | /* Program the max ARB time for the PHY to 700us so we inter-operate with | 228 | /* Program the max ARB time for the PHY to 700us so we inter-operate with |
216 | * the PMC expander which shuts down PHYs if the expander PHY generates too | 229 | * the PMC expander which shuts down PHYs if the expander PHY generates too |
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h index 9b266c7428e8..00afc738bbed 100644 --- a/drivers/scsi/isci/registers.h +++ b/drivers/scsi/isci/registers.h | |||
@@ -1299,6 +1299,18 @@ struct scu_transport_layer_registers { | |||
1299 | #define SCU_AFE_XCVRCR_OFFSET 0x00DC | 1299 | #define SCU_AFE_XCVRCR_OFFSET 0x00DC |
1300 | #define SCU_AFE_LUTCR_OFFSET 0x00E0 | 1300 | #define SCU_AFE_LUTCR_OFFSET 0x00E0 |
1301 | 1301 | ||
1302 | #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT (0UL) | ||
1303 | #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK (0x000000FFUL) | ||
1304 | #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT (8UL) | ||
1305 | #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK (0x0000FF00UL) | ||
1306 | #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT (16UL) | ||
1307 | #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK (0x00FF0000UL) | ||
1308 | #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT (24UL) | ||
1309 | #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK (0xFF000000UL) | ||
1310 | |||
1311 | #define SCU_SAS_PHYTOV_GEN_VAL(name, value) \ | ||
1312 | SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value) | ||
1313 | |||
1302 | #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0) | 1314 | #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0) |
1303 | #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003) | 1315 | #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003) |
1304 | #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0) | 1316 | #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0) |
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index a46e07ac789f..b5d3a8c4d329 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c | |||
@@ -732,12 +732,20 @@ sci_io_request_terminate(struct isci_request *ireq) | |||
732 | sci_change_state(&ireq->sm, SCI_REQ_ABORTING); | 732 | sci_change_state(&ireq->sm, SCI_REQ_ABORTING); |
733 | return SCI_SUCCESS; | 733 | return SCI_SUCCESS; |
734 | case SCI_REQ_TASK_WAIT_TC_RESP: | 734 | case SCI_REQ_TASK_WAIT_TC_RESP: |
735 | /* The task frame was already confirmed to have been | ||
736 | * sent by the SCU HW. Since the state machine is | ||
737 | * now only waiting for the task response itself, | ||
738 | * abort the request and complete it immediately | ||
739 | * and don't wait for the task response. | ||
740 | */ | ||
735 | sci_change_state(&ireq->sm, SCI_REQ_ABORTING); | 741 | sci_change_state(&ireq->sm, SCI_REQ_ABORTING); |
736 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 742 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
737 | return SCI_SUCCESS; | 743 | return SCI_SUCCESS; |
738 | case SCI_REQ_ABORTING: | 744 | case SCI_REQ_ABORTING: |
739 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 745 | /* If a request has a termination requested twice, return |
740 | return SCI_SUCCESS; | 746 | * a failure indication, since HW confirmation of the first |
747 | * abort is still outstanding. | ||
748 | */ | ||
741 | case SCI_REQ_COMPLETED: | 749 | case SCI_REQ_COMPLETED: |
742 | default: | 750 | default: |
743 | dev_warn(&ireq->owning_controller->pdev->dev, | 751 | dev_warn(&ireq->owning_controller->pdev->dev, |
@@ -2399,22 +2407,19 @@ static void isci_task_save_for_upper_layer_completion( | |||
2399 | } | 2407 | } |
2400 | } | 2408 | } |
2401 | 2409 | ||
2402 | static void isci_request_process_stp_response(struct sas_task *task, | 2410 | static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) |
2403 | void *response_buffer) | ||
2404 | { | 2411 | { |
2405 | struct dev_to_host_fis *d2h_reg_fis = response_buffer; | ||
2406 | struct task_status_struct *ts = &task->task_status; | 2412 | struct task_status_struct *ts = &task->task_status; |
2407 | struct ata_task_resp *resp = (void *)&ts->buf[0]; | 2413 | struct ata_task_resp *resp = (void *)&ts->buf[0]; |
2408 | 2414 | ||
2409 | resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6)); | 2415 | resp->frame_len = sizeof(*fis); |
2410 | memcpy(&resp->ending_fis[0], response_buffer + 16, 24); | 2416 | memcpy(resp->ending_fis, fis, sizeof(*fis)); |
2411 | ts->buf_valid_size = sizeof(*resp); | 2417 | ts->buf_valid_size = sizeof(*resp); |
2412 | 2418 | ||
2413 | /** | 2419 | /* If the device fault bit is set in the status register, then |
2414 | * If the device fault bit is set in the status register, then | ||
2415 | * set the sense data and return. | 2420 | * set the sense data and return. |
2416 | */ | 2421 | */ |
2417 | if (d2h_reg_fis->status & ATA_DF) | 2422 | if (fis->status & ATA_DF) |
2418 | ts->stat = SAS_PROTO_RESPONSE; | 2423 | ts->stat = SAS_PROTO_RESPONSE; |
2419 | else | 2424 | else |
2420 | ts->stat = SAM_STAT_GOOD; | 2425 | ts->stat = SAM_STAT_GOOD; |
@@ -2428,7 +2433,6 @@ static void isci_request_io_request_complete(struct isci_host *ihost, | |||
2428 | { | 2433 | { |
2429 | struct sas_task *task = isci_request_access_task(request); | 2434 | struct sas_task *task = isci_request_access_task(request); |
2430 | struct ssp_response_iu *resp_iu; | 2435 | struct ssp_response_iu *resp_iu; |
2431 | void *resp_buf; | ||
2432 | unsigned long task_flags; | 2436 | unsigned long task_flags; |
2433 | struct isci_remote_device *idev = isci_lookup_device(task->dev); | 2437 | struct isci_remote_device *idev = isci_lookup_device(task->dev); |
2434 | enum service_response response = SAS_TASK_UNDELIVERED; | 2438 | enum service_response response = SAS_TASK_UNDELIVERED; |
@@ -2565,9 +2569,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost, | |||
2565 | task); | 2569 | task); |
2566 | 2570 | ||
2567 | if (sas_protocol_ata(task->task_proto)) { | 2571 | if (sas_protocol_ata(task->task_proto)) { |
2568 | resp_buf = &request->stp.rsp; | 2572 | isci_process_stp_response(task, &request->stp.rsp); |
2569 | isci_request_process_stp_response(task, | ||
2570 | resp_buf); | ||
2571 | } else if (SAS_PROTOCOL_SSP == task->task_proto) { | 2573 | } else if (SAS_PROTOCOL_SSP == task->task_proto) { |
2572 | 2574 | ||
2573 | /* crack the iu response buffer. */ | 2575 | /* crack the iu response buffer. */ |
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c index e9e1e2abacb9..16f88ab939c8 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.c +++ b/drivers/scsi/isci/unsolicited_frame_control.c | |||
@@ -72,7 +72,7 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost) | |||
72 | */ | 72 | */ |
73 | buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE; | 73 | buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE; |
74 | header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header); | 74 | header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header); |
75 | size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t); | 75 | size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]); |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * The Unsolicited Frame buffers are set at the start of the UF | 78 | * The Unsolicited Frame buffers are set at the start of the UF |
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h index 31cb9506f52d..75d896686f5a 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.h +++ b/drivers/scsi/isci/unsolicited_frame_control.h | |||
@@ -214,7 +214,7 @@ struct sci_uf_address_table_array { | |||
214 | * starting address of the UF address table. | 214 | * starting address of the UF address table. |
215 | * 64-bit pointers are required by the hardware. | 215 | * 64-bit pointers are required by the hardware. |
216 | */ | 216 | */ |
217 | dma_addr_t *array; | 217 | u64 *array; |
218 | 218 | ||
219 | /** | 219 | /** |
220 | * This field specifies the physical address location for the UF | 220 | * This field specifies the physical address location for the UF |
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 01ff082dc34c..d261e982a2fa 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c | |||
@@ -494,6 +494,9 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, | |||
494 | */ | 494 | */ |
495 | error = lport->tt.frame_send(lport, fp); | 495 | error = lport->tt.frame_send(lport, fp); |
496 | 496 | ||
497 | if (fh->fh_type == FC_TYPE_BLS) | ||
498 | return error; | ||
499 | |||
497 | /* | 500 | /* |
498 | * Update the exchange and sequence flags, | 501 | * Update the exchange and sequence flags, |
499 | * assuming all frames for the sequence have been sent. | 502 | * assuming all frames for the sequence have been sent. |
@@ -575,42 +578,35 @@ static void fc_seq_set_resp(struct fc_seq *sp, | |||
575 | } | 578 | } |
576 | 579 | ||
577 | /** | 580 | /** |
578 | * fc_seq_exch_abort() - Abort an exchange and sequence | 581 | * fc_exch_abort_locked() - Abort an exchange |
579 | * @req_sp: The sequence to be aborted | 582 | * @ep: The exchange to be aborted |
580 | * @timer_msec: The period of time to wait before aborting | 583 | * @timer_msec: The period of time to wait before aborting |
581 | * | 584 | * |
582 | * Generally called because of a timeout or an abort from the upper layer. | 585 | * Locking notes: Called with exch lock held |
586 | * | ||
587 | * Return value: 0 on success else error code | ||
583 | */ | 588 | */ |
584 | static int fc_seq_exch_abort(const struct fc_seq *req_sp, | 589 | static int fc_exch_abort_locked(struct fc_exch *ep, |
585 | unsigned int timer_msec) | 590 | unsigned int timer_msec) |
586 | { | 591 | { |
587 | struct fc_seq *sp; | 592 | struct fc_seq *sp; |
588 | struct fc_exch *ep; | ||
589 | struct fc_frame *fp; | 593 | struct fc_frame *fp; |
590 | int error; | 594 | int error; |
591 | 595 | ||
592 | ep = fc_seq_exch(req_sp); | ||
593 | |||
594 | spin_lock_bh(&ep->ex_lock); | ||
595 | if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || | 596 | if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || |
596 | ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) { | 597 | ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) |
597 | spin_unlock_bh(&ep->ex_lock); | ||
598 | return -ENXIO; | 598 | return -ENXIO; |
599 | } | ||
600 | 599 | ||
601 | /* | 600 | /* |
602 | * Send the abort on a new sequence if possible. | 601 | * Send the abort on a new sequence if possible. |
603 | */ | 602 | */ |
604 | sp = fc_seq_start_next_locked(&ep->seq); | 603 | sp = fc_seq_start_next_locked(&ep->seq); |
605 | if (!sp) { | 604 | if (!sp) |
606 | spin_unlock_bh(&ep->ex_lock); | ||
607 | return -ENOMEM; | 605 | return -ENOMEM; |
608 | } | ||
609 | 606 | ||
610 | ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL; | 607 | ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL; |
611 | if (timer_msec) | 608 | if (timer_msec) |
612 | fc_exch_timer_set_locked(ep, timer_msec); | 609 | fc_exch_timer_set_locked(ep, timer_msec); |
613 | spin_unlock_bh(&ep->ex_lock); | ||
614 | 610 | ||
615 | /* | 611 | /* |
616 | * If not logged into the fabric, don't send ABTS but leave | 612 | * If not logged into the fabric, don't send ABTS but leave |
@@ -633,6 +629,28 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp, | |||
633 | } | 629 | } |
634 | 630 | ||
635 | /** | 631 | /** |
632 | * fc_seq_exch_abort() - Abort an exchange and sequence | ||
633 | * @req_sp: The sequence to be aborted | ||
634 | * @timer_msec: The period of time to wait before aborting | ||
635 | * | ||
636 | * Generally called because of a timeout or an abort from the upper layer. | ||
637 | * | ||
638 | * Return value: 0 on success else error code | ||
639 | */ | ||
640 | static int fc_seq_exch_abort(const struct fc_seq *req_sp, | ||
641 | unsigned int timer_msec) | ||
642 | { | ||
643 | struct fc_exch *ep; | ||
644 | int error; | ||
645 | |||
646 | ep = fc_seq_exch(req_sp); | ||
647 | spin_lock_bh(&ep->ex_lock); | ||
648 | error = fc_exch_abort_locked(ep, timer_msec); | ||
649 | spin_unlock_bh(&ep->ex_lock); | ||
650 | return error; | ||
651 | } | ||
652 | |||
653 | /** | ||
636 | * fc_exch_timeout() - Handle exchange timer expiration | 654 | * fc_exch_timeout() - Handle exchange timer expiration |
637 | * @work: The work_struct identifying the exchange that timed out | 655 | * @work: The work_struct identifying the exchange that timed out |
638 | */ | 656 | */ |
@@ -1715,6 +1733,7 @@ static void fc_exch_reset(struct fc_exch *ep) | |||
1715 | int rc = 1; | 1733 | int rc = 1; |
1716 | 1734 | ||
1717 | spin_lock_bh(&ep->ex_lock); | 1735 | spin_lock_bh(&ep->ex_lock); |
1736 | fc_exch_abort_locked(ep, 0); | ||
1718 | ep->state |= FC_EX_RST_CLEANUP; | 1737 | ep->state |= FC_EX_RST_CLEANUP; |
1719 | if (cancel_delayed_work(&ep->timeout_work)) | 1738 | if (cancel_delayed_work(&ep->timeout_work)) |
1720 | atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ | 1739 | atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ |
@@ -1962,6 +1981,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, | |||
1962 | struct fc_exch *ep; | 1981 | struct fc_exch *ep; |
1963 | struct fc_seq *sp = NULL; | 1982 | struct fc_seq *sp = NULL; |
1964 | struct fc_frame_header *fh; | 1983 | struct fc_frame_header *fh; |
1984 | struct fc_fcp_pkt *fsp = NULL; | ||
1965 | int rc = 1; | 1985 | int rc = 1; |
1966 | 1986 | ||
1967 | ep = fc_exch_alloc(lport, fp); | 1987 | ep = fc_exch_alloc(lport, fp); |
@@ -1984,8 +2004,10 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, | |||
1984 | fc_exch_setup_hdr(ep, fp, ep->f_ctl); | 2004 | fc_exch_setup_hdr(ep, fp, ep->f_ctl); |
1985 | sp->cnt++; | 2005 | sp->cnt++; |
1986 | 2006 | ||
1987 | if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) | 2007 | if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) { |
2008 | fsp = fr_fsp(fp); | ||
1988 | fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); | 2009 | fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); |
2010 | } | ||
1989 | 2011 | ||
1990 | if (unlikely(lport->tt.frame_send(lport, fp))) | 2012 | if (unlikely(lport->tt.frame_send(lport, fp))) |
1991 | goto err; | 2013 | goto err; |
@@ -1999,7 +2021,8 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, | |||
1999 | spin_unlock_bh(&ep->ex_lock); | 2021 | spin_unlock_bh(&ep->ex_lock); |
2000 | return sp; | 2022 | return sp; |
2001 | err: | 2023 | err: |
2002 | fc_fcp_ddp_done(fr_fsp(fp)); | 2024 | if (fsp) |
2025 | fc_fcp_ddp_done(fsp); | ||
2003 | rc = fc_exch_done_locked(ep); | 2026 | rc = fc_exch_done_locked(ep); |
2004 | spin_unlock_bh(&ep->ex_lock); | 2027 | spin_unlock_bh(&ep->ex_lock); |
2005 | if (!rc) | 2028 | if (!rc) |
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index afb63c843144..4c41ee816f0b 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -2019,6 +2019,11 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd) | |||
2019 | struct fc_fcp_internal *si; | 2019 | struct fc_fcp_internal *si; |
2020 | int rc = FAILED; | 2020 | int rc = FAILED; |
2021 | unsigned long flags; | 2021 | unsigned long flags; |
2022 | int rval; | ||
2023 | |||
2024 | rval = fc_block_scsi_eh(sc_cmd); | ||
2025 | if (rval) | ||
2026 | return rval; | ||
2022 | 2027 | ||
2023 | lport = shost_priv(sc_cmd->device->host); | 2028 | lport = shost_priv(sc_cmd->device->host); |
2024 | if (lport->state != LPORT_ST_READY) | 2029 | if (lport->state != LPORT_ST_READY) |
@@ -2068,9 +2073,9 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) | |||
2068 | int rc = FAILED; | 2073 | int rc = FAILED; |
2069 | int rval; | 2074 | int rval; |
2070 | 2075 | ||
2071 | rval = fc_remote_port_chkready(rport); | 2076 | rval = fc_block_scsi_eh(sc_cmd); |
2072 | if (rval) | 2077 | if (rval) |
2073 | goto out; | 2078 | return rval; |
2074 | 2079 | ||
2075 | lport = shost_priv(sc_cmd->device->host); | 2080 | lport = shost_priv(sc_cmd->device->host); |
2076 | 2081 | ||
@@ -2116,6 +2121,8 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) | |||
2116 | 2121 | ||
2117 | FC_SCSI_DBG(lport, "Resetting host\n"); | 2122 | FC_SCSI_DBG(lport, "Resetting host\n"); |
2118 | 2123 | ||
2124 | fc_block_scsi_eh(sc_cmd); | ||
2125 | |||
2119 | lport->tt.lport_reset(lport); | 2126 | lport->tt.lport_reset(lport); |
2120 | wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; | 2127 | wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; |
2121 | while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, | 2128 | while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, |
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index e55ed9cf23fb..628f347404f9 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
@@ -88,6 +88,7 @@ | |||
88 | */ | 88 | */ |
89 | 89 | ||
90 | #include <linux/timer.h> | 90 | #include <linux/timer.h> |
91 | #include <linux/delay.h> | ||
91 | #include <linux/slab.h> | 92 | #include <linux/slab.h> |
92 | #include <asm/unaligned.h> | 93 | #include <asm/unaligned.h> |
93 | 94 | ||
@@ -1029,8 +1030,16 @@ static void fc_lport_enter_reset(struct fc_lport *lport) | |||
1029 | FCH_EVT_LIPRESET, 0); | 1030 | FCH_EVT_LIPRESET, 0); |
1030 | fc_vports_linkchange(lport); | 1031 | fc_vports_linkchange(lport); |
1031 | fc_lport_reset_locked(lport); | 1032 | fc_lport_reset_locked(lport); |
1032 | if (lport->link_up) | 1033 | if (lport->link_up) { |
1034 | /* | ||
1035 | * Wait upto resource allocation time out before | ||
1036 | * doing re-login since incomplete FIP exchanged | ||
1037 | * from last session may collide with exchanges | ||
1038 | * in new session. | ||
1039 | */ | ||
1040 | msleep(lport->r_a_tov); | ||
1033 | fc_lport_enter_flogi(lport); | 1041 | fc_lport_enter_flogi(lport); |
1042 | } | ||
1034 | } | 1043 | } |
1035 | 1044 | ||
1036 | /** | 1045 | /** |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 7836eb01c7fc..a31e05f3bfd4 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1786,13 +1786,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) | |||
1786 | fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); | 1786 | fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); |
1787 | } | 1787 | } |
1788 | 1788 | ||
1789 | if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { | 1789 | if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { |
1790 | if (ha->fw_attributes & BIT_4) { | 1790 | if (ha->fw_attributes & BIT_4) { |
1791 | int prot = 0; | ||
1791 | vha->flags.difdix_supported = 1; | 1792 | vha->flags.difdix_supported = 1; |
1792 | ql_dbg(ql_dbg_user, vha, 0x7082, | 1793 | ql_dbg(ql_dbg_user, vha, 0x7082, |
1793 | "Registered for DIF/DIX type 1 and 3 protection.\n"); | 1794 | "Registered for DIF/DIX type 1 and 3 protection.\n"); |
1795 | if (ql2xenabledif == 1) | ||
1796 | prot = SHOST_DIX_TYPE0_PROTECTION; | ||
1794 | scsi_host_set_prot(vha->host, | 1797 | scsi_host_set_prot(vha->host, |
1795 | SHOST_DIF_TYPE1_PROTECTION | 1798 | prot | SHOST_DIF_TYPE1_PROTECTION |
1796 | | SHOST_DIF_TYPE2_PROTECTION | 1799 | | SHOST_DIF_TYPE2_PROTECTION |
1797 | | SHOST_DIF_TYPE3_PROTECTION | 1800 | | SHOST_DIF_TYPE3_PROTECTION |
1798 | | SHOST_DIX_TYPE1_PROTECTION | 1801 | | SHOST_DIX_TYPE1_PROTECTION |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 2155071f3100..d79cd8a5f831 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
@@ -8,24 +8,24 @@ | |||
8 | /* | 8 | /* |
9 | * Table for showing the current message id in use for particular level | 9 | * Table for showing the current message id in use for particular level |
10 | * Change this table for addition of log/debug messages. | 10 | * Change this table for addition of log/debug messages. |
11 | * ----------------------------------------------------- | 11 | * ---------------------------------------------------------------------- |
12 | * | Level | Last Value Used | | 12 | * | Level | Last Value Used | Holes | |
13 | * ----------------------------------------------------- | 13 | * ---------------------------------------------------------------------- |
14 | * | Module Init and Probe | 0x0116 | | 14 | * | Module Init and Probe | 0x0116 | | |
15 | * | Mailbox commands | 0x111e | | 15 | * | Mailbox commands | 0x1126 | | |
16 | * | Device Discovery | 0x2083 | | 16 | * | Device Discovery | 0x2083 | | |
17 | * | Queue Command and IO tracing | 0x302e | | 17 | * | Queue Command and IO tracing | 0x302e | 0x3008 | |
18 | * | DPC Thread | 0x401c | | 18 | * | DPC Thread | 0x401c | | |
19 | * | Async Events | 0x5059 | | 19 | * | Async Events | 0x5059 | | |
20 | * | Timer Routines | 0x600d | | 20 | * | Timer Routines | 0x600d | | |
21 | * | User Space Interactions | 0x709c | | 21 | * | User Space Interactions | 0x709d | | |
22 | * | Task Management | 0x8043 | | 22 | * | Task Management | 0x8041 | | |
23 | * | AER/EEH | 0x900f | | 23 | * | AER/EEH | 0x900f | | |
24 | * | Virtual Port | 0xa007 | | 24 | * | Virtual Port | 0xa007 | | |
25 | * | ISP82XX Specific | 0xb027 | | 25 | * | ISP82XX Specific | 0xb04f | | |
26 | * | MultiQ | 0xc00b | | 26 | * | MultiQ | 0xc00b | | |
27 | * | Misc | 0xd00b | | 27 | * | Misc | 0xd00b | | |
28 | * ----------------------------------------------------- | 28 | * ---------------------------------------------------------------------- |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include "qla_def.h" | 31 | #include "qla_def.h" |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index cc5a79259d33..a03eaf40f377 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -2529,6 +2529,7 @@ struct qla_hw_data { | |||
2529 | #define DT_ISP8021 BIT_14 | 2529 | #define DT_ISP8021 BIT_14 |
2530 | #define DT_ISP_LAST (DT_ISP8021 << 1) | 2530 | #define DT_ISP_LAST (DT_ISP8021 << 1) |
2531 | 2531 | ||
2532 | #define DT_T10_PI BIT_25 | ||
2532 | #define DT_IIDMA BIT_26 | 2533 | #define DT_IIDMA BIT_26 |
2533 | #define DT_FWI2 BIT_27 | 2534 | #define DT_FWI2 BIT_27 |
2534 | #define DT_ZIO_SUPPORTED BIT_28 | 2535 | #define DT_ZIO_SUPPORTED BIT_28 |
@@ -2572,6 +2573,7 @@ struct qla_hw_data { | |||
2572 | #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha)) | 2573 | #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha)) |
2573 | #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) | 2574 | #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) |
2574 | 2575 | ||
2576 | #define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI) | ||
2575 | #define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA) | 2577 | #define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA) |
2576 | #define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2) | 2578 | #define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2) |
2577 | #define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED) | 2579 | #define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED) |
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 691783abfb69..aa69486dc064 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h | |||
@@ -537,6 +537,11 @@ struct sts_entry_24xx { | |||
537 | /* | 537 | /* |
538 | * If DIF Error is set in comp_status, these additional fields are | 538 | * If DIF Error is set in comp_status, these additional fields are |
539 | * defined: | 539 | * defined: |
540 | * | ||
541 | * !!! NOTE: Firmware sends expected/actual DIF data in big endian | ||
542 | * format; but all of the "data" field gets swab32-d in the beginning | ||
543 | * of qla2x00_status_entry(). | ||
544 | * | ||
540 | * &data[10] : uint8_t report_runt_bg[2]; - computed guard | 545 | * &data[10] : uint8_t report_runt_bg[2]; - computed guard |
541 | * &data[12] : uint8_t actual_dif[8]; - DIF Data received | 546 | * &data[12] : uint8_t actual_dif[8]; - DIF Data received |
542 | * &data[20] : uint8_t expected_dif[8]; - DIF Data computed | 547 | * &data[20] : uint8_t expected_dif[8]; - DIF Data computed |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index def694271bf7..37da04d3db26 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -3838,15 +3838,12 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) | |||
3838 | req = vha->req; | 3838 | req = vha->req; |
3839 | rsp = req->rsp; | 3839 | rsp = req->rsp; |
3840 | 3840 | ||
3841 | atomic_set(&vha->loop_state, LOOP_UPDATE); | ||
3842 | clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); | 3841 | clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); |
3843 | if (vha->flags.online) { | 3842 | if (vha->flags.online) { |
3844 | if (!(rval = qla2x00_fw_ready(vha))) { | 3843 | if (!(rval = qla2x00_fw_ready(vha))) { |
3845 | /* Wait at most MAX_TARGET RSCNs for a stable link. */ | 3844 | /* Wait at most MAX_TARGET RSCNs for a stable link. */ |
3846 | wait_time = 256; | 3845 | wait_time = 256; |
3847 | do { | 3846 | do { |
3848 | atomic_set(&vha->loop_state, LOOP_UPDATE); | ||
3849 | |||
3850 | /* Issue a marker after FW becomes ready. */ | 3847 | /* Issue a marker after FW becomes ready. */ |
3851 | qla2x00_marker(vha, req, rsp, 0, 0, | 3848 | qla2x00_marker(vha, req, rsp, 0, 0, |
3852 | MK_SYNC_ALL); | 3849 | MK_SYNC_ALL); |
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index d2e904bc21c0..9902834e0b74 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h | |||
@@ -102,3 +102,32 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state) | |||
102 | fcport->d_id.b.al_pa); | 102 | fcport->d_id.b.al_pa); |
103 | } | 103 | } |
104 | } | 104 | } |
105 | |||
106 | static inline int | ||
107 | qla2x00_hba_err_chk_enabled(srb_t *sp) | ||
108 | { | ||
109 | /* | ||
110 | * Uncomment when corresponding SCSI changes are done. | ||
111 | * | ||
112 | if (!sp->cmd->prot_chk) | ||
113 | return 0; | ||
114 | * | ||
115 | */ | ||
116 | |||
117 | switch (scsi_get_prot_op(sp->cmd)) { | ||
118 | case SCSI_PROT_READ_STRIP: | ||
119 | case SCSI_PROT_WRITE_INSERT: | ||
120 | if (ql2xenablehba_err_chk >= 1) | ||
121 | return 1; | ||
122 | break; | ||
123 | case SCSI_PROT_READ_PASS: | ||
124 | case SCSI_PROT_WRITE_PASS: | ||
125 | if (ql2xenablehba_err_chk >= 2) | ||
126 | return 1; | ||
127 | break; | ||
128 | case SCSI_PROT_READ_INSERT: | ||
129 | case SCSI_PROT_WRITE_STRIP: | ||
130 | return 1; | ||
131 | } | ||
132 | return 0; | ||
133 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 49d6906af886..dbec89622a0f 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -709,20 +709,28 @@ struct fw_dif_context { | |||
709 | * | 709 | * |
710 | */ | 710 | */ |
711 | static inline void | 711 | static inline void |
712 | qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, | 712 | qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, |
713 | unsigned int protcnt) | 713 | unsigned int protcnt) |
714 | { | 714 | { |
715 | struct sd_dif_tuple *spt; | 715 | struct scsi_cmnd *cmd = sp->cmd; |
716 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); | 716 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); |
717 | unsigned char op = scsi_get_prot_op(cmd); | ||
718 | 717 | ||
719 | switch (scsi_get_prot_type(cmd)) { | 718 | switch (scsi_get_prot_type(cmd)) { |
720 | /* For TYPE 0 protection: no checking */ | ||
721 | case SCSI_PROT_DIF_TYPE0: | 719 | case SCSI_PROT_DIF_TYPE0: |
722 | pkt->ref_tag_mask[0] = 0x00; | 720 | /* |
723 | pkt->ref_tag_mask[1] = 0x00; | 721 | * No check for ql2xenablehba_err_chk, as it would be an |
724 | pkt->ref_tag_mask[2] = 0x00; | 722 | * I/O error if hba tag generation is not done. |
725 | pkt->ref_tag_mask[3] = 0x00; | 723 | */ |
724 | pkt->ref_tag = cpu_to_le32((uint32_t) | ||
725 | (0xffffffff & scsi_get_lba(cmd))); | ||
726 | |||
727 | if (!qla2x00_hba_err_chk_enabled(sp)) | ||
728 | break; | ||
729 | |||
730 | pkt->ref_tag_mask[0] = 0xff; | ||
731 | pkt->ref_tag_mask[1] = 0xff; | ||
732 | pkt->ref_tag_mask[2] = 0xff; | ||
733 | pkt->ref_tag_mask[3] = 0xff; | ||
726 | break; | 734 | break; |
727 | 735 | ||
728 | /* | 736 | /* |
@@ -730,20 +738,16 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, | |||
730 | * match LBA in CDB + N | 738 | * match LBA in CDB + N |
731 | */ | 739 | */ |
732 | case SCSI_PROT_DIF_TYPE2: | 740 | case SCSI_PROT_DIF_TYPE2: |
733 | if (!ql2xenablehba_err_chk) | 741 | pkt->app_tag = __constant_cpu_to_le16(0); |
734 | break; | 742 | pkt->app_tag_mask[0] = 0x0; |
735 | 743 | pkt->app_tag_mask[1] = 0x0; | |
736 | if (scsi_prot_sg_count(cmd)) { | ||
737 | spt = page_address(sg_page(scsi_prot_sglist(cmd))) + | ||
738 | scsi_prot_sglist(cmd)[0].offset; | ||
739 | pkt->app_tag = swab32(spt->app_tag); | ||
740 | pkt->app_tag_mask[0] = 0xff; | ||
741 | pkt->app_tag_mask[1] = 0xff; | ||
742 | } | ||
743 | 744 | ||
744 | pkt->ref_tag = cpu_to_le32((uint32_t) | 745 | pkt->ref_tag = cpu_to_le32((uint32_t) |
745 | (0xffffffff & scsi_get_lba(cmd))); | 746 | (0xffffffff & scsi_get_lba(cmd))); |
746 | 747 | ||
748 | if (!qla2x00_hba_err_chk_enabled(sp)) | ||
749 | break; | ||
750 | |||
747 | /* enable ALL bytes of the ref tag */ | 751 | /* enable ALL bytes of the ref tag */ |
748 | pkt->ref_tag_mask[0] = 0xff; | 752 | pkt->ref_tag_mask[0] = 0xff; |
749 | pkt->ref_tag_mask[1] = 0xff; | 753 | pkt->ref_tag_mask[1] = 0xff; |
@@ -763,26 +767,15 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, | |||
763 | * 16 bit app tag. | 767 | * 16 bit app tag. |
764 | */ | 768 | */ |
765 | case SCSI_PROT_DIF_TYPE1: | 769 | case SCSI_PROT_DIF_TYPE1: |
766 | if (!ql2xenablehba_err_chk) | 770 | pkt->ref_tag = cpu_to_le32((uint32_t) |
771 | (0xffffffff & scsi_get_lba(cmd))); | ||
772 | pkt->app_tag = __constant_cpu_to_le16(0); | ||
773 | pkt->app_tag_mask[0] = 0x0; | ||
774 | pkt->app_tag_mask[1] = 0x0; | ||
775 | |||
776 | if (!qla2x00_hba_err_chk_enabled(sp)) | ||
767 | break; | 777 | break; |
768 | 778 | ||
769 | if (protcnt && (op == SCSI_PROT_WRITE_STRIP || | ||
770 | op == SCSI_PROT_WRITE_PASS)) { | ||
771 | spt = page_address(sg_page(scsi_prot_sglist(cmd))) + | ||
772 | scsi_prot_sglist(cmd)[0].offset; | ||
773 | ql_dbg(ql_dbg_io, vha, 0x3008, | ||
774 | "LBA from user %p, lba = 0x%x for cmd=%p.\n", | ||
775 | spt, (int)spt->ref_tag, cmd); | ||
776 | pkt->ref_tag = swab32(spt->ref_tag); | ||
777 | pkt->app_tag_mask[0] = 0x0; | ||
778 | pkt->app_tag_mask[1] = 0x0; | ||
779 | } else { | ||
780 | pkt->ref_tag = cpu_to_le32((uint32_t) | ||
781 | (0xffffffff & scsi_get_lba(cmd))); | ||
782 | pkt->app_tag = __constant_cpu_to_le16(0); | ||
783 | pkt->app_tag_mask[0] = 0x0; | ||
784 | pkt->app_tag_mask[1] = 0x0; | ||
785 | } | ||
786 | /* enable ALL bytes of the ref tag */ | 779 | /* enable ALL bytes of the ref tag */ |
787 | pkt->ref_tag_mask[0] = 0xff; | 780 | pkt->ref_tag_mask[0] = 0xff; |
788 | pkt->ref_tag_mask[1] = 0xff; | 781 | pkt->ref_tag_mask[1] = 0xff; |
@@ -798,8 +791,162 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, | |||
798 | scsi_get_prot_type(cmd), cmd); | 791 | scsi_get_prot_type(cmd), cmd); |
799 | } | 792 | } |
800 | 793 | ||
794 | struct qla2_sgx { | ||
795 | dma_addr_t dma_addr; /* OUT */ | ||
796 | uint32_t dma_len; /* OUT */ | ||
797 | |||
798 | uint32_t tot_bytes; /* IN */ | ||
799 | struct scatterlist *cur_sg; /* IN */ | ||
800 | |||
801 | /* for book keeping, bzero on initial invocation */ | ||
802 | uint32_t bytes_consumed; | ||
803 | uint32_t num_bytes; | ||
804 | uint32_t tot_partial; | ||
805 | |||
806 | /* for debugging */ | ||
807 | uint32_t num_sg; | ||
808 | srb_t *sp; | ||
809 | }; | ||
801 | 810 | ||
802 | static int | 811 | static int |
812 | qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, | ||
813 | uint32_t *partial) | ||
814 | { | ||
815 | struct scatterlist *sg; | ||
816 | uint32_t cumulative_partial, sg_len; | ||
817 | dma_addr_t sg_dma_addr; | ||
818 | |||
819 | if (sgx->num_bytes == sgx->tot_bytes) | ||
820 | return 0; | ||
821 | |||
822 | sg = sgx->cur_sg; | ||
823 | cumulative_partial = sgx->tot_partial; | ||
824 | |||
825 | sg_dma_addr = sg_dma_address(sg); | ||
826 | sg_len = sg_dma_len(sg); | ||
827 | |||
828 | sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; | ||
829 | |||
830 | if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { | ||
831 | sgx->dma_len = (blk_sz - cumulative_partial); | ||
832 | sgx->tot_partial = 0; | ||
833 | sgx->num_bytes += blk_sz; | ||
834 | *partial = 0; | ||
835 | } else { | ||
836 | sgx->dma_len = sg_len - sgx->bytes_consumed; | ||
837 | sgx->tot_partial += sgx->dma_len; | ||
838 | *partial = 1; | ||
839 | } | ||
840 | |||
841 | sgx->bytes_consumed += sgx->dma_len; | ||
842 | |||
843 | if (sg_len == sgx->bytes_consumed) { | ||
844 | sg = sg_next(sg); | ||
845 | sgx->num_sg++; | ||
846 | sgx->cur_sg = sg; | ||
847 | sgx->bytes_consumed = 0; | ||
848 | } | ||
849 | |||
850 | return 1; | ||
851 | } | ||
852 | |||
853 | static int | ||
854 | qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, | ||
855 | uint32_t *dsd, uint16_t tot_dsds) | ||
856 | { | ||
857 | void *next_dsd; | ||
858 | uint8_t avail_dsds = 0; | ||
859 | uint32_t dsd_list_len; | ||
860 | struct dsd_dma *dsd_ptr; | ||
861 | struct scatterlist *sg_prot; | ||
862 | uint32_t *cur_dsd = dsd; | ||
863 | uint16_t used_dsds = tot_dsds; | ||
864 | |||
865 | uint32_t prot_int; | ||
866 | uint32_t partial; | ||
867 | struct qla2_sgx sgx; | ||
868 | dma_addr_t sle_dma; | ||
869 | uint32_t sle_dma_len, tot_prot_dma_len = 0; | ||
870 | struct scsi_cmnd *cmd = sp->cmd; | ||
871 | |||
872 | prot_int = cmd->device->sector_size; | ||
873 | |||
874 | memset(&sgx, 0, sizeof(struct qla2_sgx)); | ||
875 | sgx.tot_bytes = scsi_bufflen(sp->cmd); | ||
876 | sgx.cur_sg = scsi_sglist(sp->cmd); | ||
877 | sgx.sp = sp; | ||
878 | |||
879 | sg_prot = scsi_prot_sglist(sp->cmd); | ||
880 | |||
881 | while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { | ||
882 | |||
883 | sle_dma = sgx.dma_addr; | ||
884 | sle_dma_len = sgx.dma_len; | ||
885 | alloc_and_fill: | ||
886 | /* Allocate additional continuation packets? */ | ||
887 | if (avail_dsds == 0) { | ||
888 | avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? | ||
889 | QLA_DSDS_PER_IOCB : used_dsds; | ||
890 | dsd_list_len = (avail_dsds + 1) * 12; | ||
891 | used_dsds -= avail_dsds; | ||
892 | |||
893 | /* allocate tracking DS */ | ||
894 | dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); | ||
895 | if (!dsd_ptr) | ||
896 | return 1; | ||
897 | |||
898 | /* allocate new list */ | ||
899 | dsd_ptr->dsd_addr = next_dsd = | ||
900 | dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, | ||
901 | &dsd_ptr->dsd_list_dma); | ||
902 | |||
903 | if (!next_dsd) { | ||
904 | /* | ||
905 | * Need to cleanup only this dsd_ptr, rest | ||
906 | * will be done by sp_free_dma() | ||
907 | */ | ||
908 | kfree(dsd_ptr); | ||
909 | return 1; | ||
910 | } | ||
911 | |||
912 | list_add_tail(&dsd_ptr->list, | ||
913 | &((struct crc_context *)sp->ctx)->dsd_list); | ||
914 | |||
915 | sp->flags |= SRB_CRC_CTX_DSD_VALID; | ||
916 | |||
917 | /* add new list to cmd iocb or last list */ | ||
918 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | ||
919 | *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); | ||
920 | *cur_dsd++ = dsd_list_len; | ||
921 | cur_dsd = (uint32_t *)next_dsd; | ||
922 | } | ||
923 | *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | ||
924 | *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | ||
925 | *cur_dsd++ = cpu_to_le32(sle_dma_len); | ||
926 | avail_dsds--; | ||
927 | |||
928 | if (partial == 0) { | ||
929 | /* Got a full protection interval */ | ||
930 | sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; | ||
931 | sle_dma_len = 8; | ||
932 | |||
933 | tot_prot_dma_len += sle_dma_len; | ||
934 | if (tot_prot_dma_len == sg_dma_len(sg_prot)) { | ||
935 | tot_prot_dma_len = 0; | ||
936 | sg_prot = sg_next(sg_prot); | ||
937 | } | ||
938 | |||
939 | partial = 1; /* So as to not re-enter this block */ | ||
940 | goto alloc_and_fill; | ||
941 | } | ||
942 | } | ||
943 | /* Null termination */ | ||
944 | *cur_dsd++ = 0; | ||
945 | *cur_dsd++ = 0; | ||
946 | *cur_dsd++ = 0; | ||
947 | return 0; | ||
948 | } | ||
949 | static int | ||
803 | qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | 950 | qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, |
804 | uint16_t tot_dsds) | 951 | uint16_t tot_dsds) |
805 | { | 952 | { |
@@ -981,7 +1128,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
981 | struct scsi_cmnd *cmd; | 1128 | struct scsi_cmnd *cmd; |
982 | struct scatterlist *cur_seg; | 1129 | struct scatterlist *cur_seg; |
983 | int sgc; | 1130 | int sgc; |
984 | uint32_t total_bytes; | 1131 | uint32_t total_bytes = 0; |
985 | uint32_t data_bytes; | 1132 | uint32_t data_bytes; |
986 | uint32_t dif_bytes; | 1133 | uint32_t dif_bytes; |
987 | uint8_t bundling = 1; | 1134 | uint8_t bundling = 1; |
@@ -1023,8 +1170,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
1023 | __constant_cpu_to_le16(CF_READ_DATA); | 1170 | __constant_cpu_to_le16(CF_READ_DATA); |
1024 | } | 1171 | } |
1025 | 1172 | ||
1026 | tot_prot_dsds = scsi_prot_sg_count(cmd); | 1173 | if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) || |
1027 | if (!tot_prot_dsds) | 1174 | (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) || |
1175 | (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) || | ||
1176 | (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT)) | ||
1028 | bundling = 0; | 1177 | bundling = 0; |
1029 | 1178 | ||
1030 | /* Allocate CRC context from global pool */ | 1179 | /* Allocate CRC context from global pool */ |
@@ -1047,7 +1196,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
1047 | 1196 | ||
1048 | INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); | 1197 | INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); |
1049 | 1198 | ||
1050 | qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *) | 1199 | qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *) |
1051 | &crc_ctx_pkt->ref_tag, tot_prot_dsds); | 1200 | &crc_ctx_pkt->ref_tag, tot_prot_dsds); |
1052 | 1201 | ||
1053 | cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); | 1202 | cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); |
@@ -1076,7 +1225,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
1076 | fcp_cmnd->additional_cdb_len |= 2; | 1225 | fcp_cmnd->additional_cdb_len |= 2; |
1077 | 1226 | ||
1078 | int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); | 1227 | int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); |
1079 | host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun)); | ||
1080 | memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); | 1228 | memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); |
1081 | cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); | 1229 | cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); |
1082 | cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( | 1230 | cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( |
@@ -1107,15 +1255,28 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
1107 | cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ | 1255 | cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ |
1108 | 1256 | ||
1109 | /* Compute dif len and adjust data len to incude protection */ | 1257 | /* Compute dif len and adjust data len to incude protection */ |
1110 | total_bytes = data_bytes; | ||
1111 | dif_bytes = 0; | 1258 | dif_bytes = 0; |
1112 | blk_size = cmd->device->sector_size; | 1259 | blk_size = cmd->device->sector_size; |
1113 | if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { | 1260 | dif_bytes = (data_bytes / blk_size) * 8; |
1114 | dif_bytes = (data_bytes / blk_size) * 8; | 1261 | |
1115 | total_bytes += dif_bytes; | 1262 | switch (scsi_get_prot_op(sp->cmd)) { |
1263 | case SCSI_PROT_READ_INSERT: | ||
1264 | case SCSI_PROT_WRITE_STRIP: | ||
1265 | total_bytes = data_bytes; | ||
1266 | data_bytes += dif_bytes; | ||
1267 | break; | ||
1268 | |||
1269 | case SCSI_PROT_READ_STRIP: | ||
1270 | case SCSI_PROT_WRITE_INSERT: | ||
1271 | case SCSI_PROT_READ_PASS: | ||
1272 | case SCSI_PROT_WRITE_PASS: | ||
1273 | total_bytes = data_bytes + dif_bytes; | ||
1274 | break; | ||
1275 | default: | ||
1276 | BUG(); | ||
1116 | } | 1277 | } |
1117 | 1278 | ||
1118 | if (!ql2xenablehba_err_chk) | 1279 | if (!qla2x00_hba_err_chk_enabled(sp)) |
1119 | fw_prot_opts |= 0x10; /* Disable Guard tag checking */ | 1280 | fw_prot_opts |= 0x10; /* Disable Guard tag checking */ |
1120 | 1281 | ||
1121 | if (!bundling) { | 1282 | if (!bundling) { |
@@ -1151,7 +1312,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
1151 | 1312 | ||
1152 | cmd_pkt->control_flags |= | 1313 | cmd_pkt->control_flags |= |
1153 | __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); | 1314 | __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); |
1154 | if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, | 1315 | |
1316 | if (!bundling && tot_prot_dsds) { | ||
1317 | if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, | ||
1318 | cur_dsd, tot_dsds)) | ||
1319 | goto crc_queuing_error; | ||
1320 | } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, | ||
1155 | (tot_dsds - tot_prot_dsds))) | 1321 | (tot_dsds - tot_prot_dsds))) |
1156 | goto crc_queuing_error; | 1322 | goto crc_queuing_error; |
1157 | 1323 | ||
@@ -1414,6 +1580,22 @@ qla24xx_dif_start_scsi(srb_t *sp) | |||
1414 | goto queuing_error; | 1580 | goto queuing_error; |
1415 | else | 1581 | else |
1416 | sp->flags |= SRB_DMA_VALID; | 1582 | sp->flags |= SRB_DMA_VALID; |
1583 | |||
1584 | if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || | ||
1585 | (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { | ||
1586 | struct qla2_sgx sgx; | ||
1587 | uint32_t partial; | ||
1588 | |||
1589 | memset(&sgx, 0, sizeof(struct qla2_sgx)); | ||
1590 | sgx.tot_bytes = scsi_bufflen(cmd); | ||
1591 | sgx.cur_sg = scsi_sglist(cmd); | ||
1592 | sgx.sp = sp; | ||
1593 | |||
1594 | nseg = 0; | ||
1595 | while (qla24xx_get_one_block_sg( | ||
1596 | cmd->device->sector_size, &sgx, &partial)) | ||
1597 | nseg++; | ||
1598 | } | ||
1417 | } else | 1599 | } else |
1418 | nseg = 0; | 1600 | nseg = 0; |
1419 | 1601 | ||
@@ -1428,6 +1610,11 @@ qla24xx_dif_start_scsi(srb_t *sp) | |||
1428 | goto queuing_error; | 1610 | goto queuing_error; |
1429 | else | 1611 | else |
1430 | sp->flags |= SRB_CRC_PROT_DMA_VALID; | 1612 | sp->flags |= SRB_CRC_PROT_DMA_VALID; |
1613 | |||
1614 | if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || | ||
1615 | (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { | ||
1616 | nseg = scsi_bufflen(cmd) / cmd->device->sector_size; | ||
1617 | } | ||
1431 | } else { | 1618 | } else { |
1432 | nseg = 0; | 1619 | nseg = 0; |
1433 | } | 1620 | } |
@@ -1454,6 +1641,7 @@ qla24xx_dif_start_scsi(srb_t *sp) | |||
1454 | /* Build header part of command packet (excluding the OPCODE). */ | 1641 | /* Build header part of command packet (excluding the OPCODE). */ |
1455 | req->current_outstanding_cmd = handle; | 1642 | req->current_outstanding_cmd = handle; |
1456 | req->outstanding_cmds[handle] = sp; | 1643 | req->outstanding_cmds[handle] = sp; |
1644 | sp->handle = handle; | ||
1457 | sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; | 1645 | sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; |
1458 | req->cnt -= req_cnt; | 1646 | req->cnt -= req_cnt; |
1459 | 1647 | ||
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index b16b7725dee0..646fc5263d50 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -719,7 +719,6 @@ skip_rio: | |||
719 | vha->flags.rscn_queue_overflow = 1; | 719 | vha->flags.rscn_queue_overflow = 1; |
720 | } | 720 | } |
721 | 721 | ||
722 | atomic_set(&vha->loop_state, LOOP_UPDATE); | ||
723 | atomic_set(&vha->loop_down_timer, 0); | 722 | atomic_set(&vha->loop_down_timer, 0); |
724 | vha->flags.management_server_logged_in = 0; | 723 | vha->flags.management_server_logged_in = 0; |
725 | 724 | ||
@@ -1435,25 +1434,27 @@ struct scsi_dif_tuple { | |||
1435 | * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST | 1434 | * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST |
1436 | * to indicate to the kernel that the HBA detected error. | 1435 | * to indicate to the kernel that the HBA detected error. |
1437 | */ | 1436 | */ |
1438 | static inline void | 1437 | static inline int |
1439 | qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) | 1438 | qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) |
1440 | { | 1439 | { |
1441 | struct scsi_qla_host *vha = sp->fcport->vha; | 1440 | struct scsi_qla_host *vha = sp->fcport->vha; |
1442 | struct scsi_cmnd *cmd = sp->cmd; | 1441 | struct scsi_cmnd *cmd = sp->cmd; |
1443 | struct scsi_dif_tuple *ep = | 1442 | uint8_t *ap = &sts24->data[12]; |
1444 | (struct scsi_dif_tuple *)&sts24->data[20]; | 1443 | uint8_t *ep = &sts24->data[20]; |
1445 | struct scsi_dif_tuple *ap = | ||
1446 | (struct scsi_dif_tuple *)&sts24->data[12]; | ||
1447 | uint32_t e_ref_tag, a_ref_tag; | 1444 | uint32_t e_ref_tag, a_ref_tag; |
1448 | uint16_t e_app_tag, a_app_tag; | 1445 | uint16_t e_app_tag, a_app_tag; |
1449 | uint16_t e_guard, a_guard; | 1446 | uint16_t e_guard, a_guard; |
1450 | 1447 | ||
1451 | e_ref_tag = be32_to_cpu(ep->ref_tag); | 1448 | /* |
1452 | a_ref_tag = be32_to_cpu(ap->ref_tag); | 1449 | * swab32 of the "data" field in the beginning of qla2x00_status_entry() |
1453 | e_app_tag = be16_to_cpu(ep->app_tag); | 1450 | * would make guard field appear at offset 2 |
1454 | a_app_tag = be16_to_cpu(ap->app_tag); | 1451 | */ |
1455 | e_guard = be16_to_cpu(ep->guard); | 1452 | a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); |
1456 | a_guard = be16_to_cpu(ap->guard); | 1453 | a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); |
1454 | a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); | ||
1455 | e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); | ||
1456 | e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); | ||
1457 | e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); | ||
1457 | 1458 | ||
1458 | ql_dbg(ql_dbg_io, vha, 0x3023, | 1459 | ql_dbg(ql_dbg_io, vha, 0x3023, |
1459 | "iocb(s) %p Returned STATUS.\n", sts24); | 1460 | "iocb(s) %p Returned STATUS.\n", sts24); |
@@ -1465,6 +1466,63 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) | |||
1465 | cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, | 1466 | cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, |
1466 | a_app_tag, e_app_tag, a_guard, e_guard); | 1467 | a_app_tag, e_app_tag, a_guard, e_guard); |
1467 | 1468 | ||
1469 | /* | ||
1470 | * Ignore sector if: | ||
1471 | * For type 3: ref & app tag is all 'f's | ||
1472 | * For type 0,1,2: app tag is all 'f's | ||
1473 | */ | ||
1474 | if ((a_app_tag == 0xffff) && | ||
1475 | ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || | ||
1476 | (a_ref_tag == 0xffffffff))) { | ||
1477 | uint32_t blocks_done, resid; | ||
1478 | sector_t lba_s = scsi_get_lba(cmd); | ||
1479 | |||
1480 | /* 2TB boundary case covered automatically with this */ | ||
1481 | blocks_done = e_ref_tag - (uint32_t)lba_s + 1; | ||
1482 | |||
1483 | resid = scsi_bufflen(cmd) - (blocks_done * | ||
1484 | cmd->device->sector_size); | ||
1485 | |||
1486 | scsi_set_resid(cmd, resid); | ||
1487 | cmd->result = DID_OK << 16; | ||
1488 | |||
1489 | /* Update protection tag */ | ||
1490 | if (scsi_prot_sg_count(cmd)) { | ||
1491 | uint32_t i, j = 0, k = 0, num_ent; | ||
1492 | struct scatterlist *sg; | ||
1493 | struct sd_dif_tuple *spt; | ||
1494 | |||
1495 | /* Patch the corresponding protection tags */ | ||
1496 | scsi_for_each_prot_sg(cmd, sg, | ||
1497 | scsi_prot_sg_count(cmd), i) { | ||
1498 | num_ent = sg_dma_len(sg) / 8; | ||
1499 | if (k + num_ent < blocks_done) { | ||
1500 | k += num_ent; | ||
1501 | continue; | ||
1502 | } | ||
1503 | j = blocks_done - k - 1; | ||
1504 | k = blocks_done; | ||
1505 | break; | ||
1506 | } | ||
1507 | |||
1508 | if (k != blocks_done) { | ||
1509 | qla_printk(KERN_WARNING, sp->fcport->vha->hw, | ||
1510 | "unexpected tag values tag:lba=%x:%lx)\n", | ||
1511 | e_ref_tag, lba_s); | ||
1512 | return 1; | ||
1513 | } | ||
1514 | |||
1515 | spt = page_address(sg_page(sg)) + sg->offset; | ||
1516 | spt += j; | ||
1517 | |||
1518 | spt->app_tag = 0xffff; | ||
1519 | if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) | ||
1520 | spt->ref_tag = 0xffffffff; | ||
1521 | } | ||
1522 | |||
1523 | return 0; | ||
1524 | } | ||
1525 | |||
1468 | /* check guard */ | 1526 | /* check guard */ |
1469 | if (e_guard != a_guard) { | 1527 | if (e_guard != a_guard) { |
1470 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, | 1528 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, |
@@ -1472,28 +1530,30 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) | |||
1472 | set_driver_byte(cmd, DRIVER_SENSE); | 1530 | set_driver_byte(cmd, DRIVER_SENSE); |
1473 | set_host_byte(cmd, DID_ABORT); | 1531 | set_host_byte(cmd, DID_ABORT); |
1474 | cmd->result |= SAM_STAT_CHECK_CONDITION << 1; | 1532 | cmd->result |= SAM_STAT_CHECK_CONDITION << 1; |
1475 | return; | 1533 | return 1; |
1476 | } | 1534 | } |
1477 | 1535 | ||
1478 | /* check appl tag */ | 1536 | /* check ref tag */ |
1479 | if (e_app_tag != a_app_tag) { | 1537 | if (e_ref_tag != a_ref_tag) { |
1480 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, | 1538 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, |
1481 | 0x10, 0x2); | 1539 | 0x10, 0x3); |
1482 | set_driver_byte(cmd, DRIVER_SENSE); | 1540 | set_driver_byte(cmd, DRIVER_SENSE); |
1483 | set_host_byte(cmd, DID_ABORT); | 1541 | set_host_byte(cmd, DID_ABORT); |
1484 | cmd->result |= SAM_STAT_CHECK_CONDITION << 1; | 1542 | cmd->result |= SAM_STAT_CHECK_CONDITION << 1; |
1485 | return; | 1543 | return 1; |
1486 | } | 1544 | } |
1487 | 1545 | ||
1488 | /* check ref tag */ | 1546 | /* check appl tag */ |
1489 | if (e_ref_tag != a_ref_tag) { | 1547 | if (e_app_tag != a_app_tag) { |
1490 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, | 1548 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, |
1491 | 0x10, 0x3); | 1549 | 0x10, 0x2); |
1492 | set_driver_byte(cmd, DRIVER_SENSE); | 1550 | set_driver_byte(cmd, DRIVER_SENSE); |
1493 | set_host_byte(cmd, DID_ABORT); | 1551 | set_host_byte(cmd, DID_ABORT); |
1494 | cmd->result |= SAM_STAT_CHECK_CONDITION << 1; | 1552 | cmd->result |= SAM_STAT_CHECK_CONDITION << 1; |
1495 | return; | 1553 | return 1; |
1496 | } | 1554 | } |
1555 | |||
1556 | return 1; | ||
1497 | } | 1557 | } |
1498 | 1558 | ||
1499 | /** | 1559 | /** |
@@ -1767,7 +1827,7 @@ check_scsi_status: | |||
1767 | break; | 1827 | break; |
1768 | 1828 | ||
1769 | case CS_DIF_ERROR: | 1829 | case CS_DIF_ERROR: |
1770 | qla2x00_handle_dif_error(sp, sts24); | 1830 | logit = qla2x00_handle_dif_error(sp, sts24); |
1771 | break; | 1831 | break; |
1772 | default: | 1832 | default: |
1773 | cp->result = DID_ERROR << 16; | 1833 | cp->result = DID_ERROR << 16; |
@@ -2468,11 +2528,10 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) | |||
2468 | goto skip_msi; | 2528 | goto skip_msi; |
2469 | } | 2529 | } |
2470 | 2530 | ||
2471 | if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || | 2531 | if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { |
2472 | !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { | ||
2473 | ql_log(ql_log_warn, vha, 0x0035, | 2532 | ql_log(ql_log_warn, vha, 0x0035, |
2474 | "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", | 2533 | "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", |
2475 | ha->pdev->revision, ha->fw_attributes); | 2534 | ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); |
2476 | goto skip_msix; | 2535 | goto skip_msix; |
2477 | } | 2536 | } |
2478 | 2537 | ||
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index c706ed370000..f488cc69fc79 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
@@ -472,7 +472,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) | |||
472 | host->can_queue = base_vha->req->length + 128; | 472 | host->can_queue = base_vha->req->length + 128; |
473 | host->this_id = 255; | 473 | host->this_id = 255; |
474 | host->cmd_per_lun = 3; | 474 | host->cmd_per_lun = 3; |
475 | if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) | 475 | if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) |
476 | host->max_cmd_len = 32; | 476 | host->max_cmd_len = 32; |
477 | else | 477 | else |
478 | host->max_cmd_len = MAX_CMDSZ; | 478 | host->max_cmd_len = MAX_CMDSZ; |
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 5cbf33a50b14..049807cda419 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c | |||
@@ -2208,6 +2208,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id) | |||
2208 | struct qla_hw_data *ha; | 2208 | struct qla_hw_data *ha; |
2209 | struct rsp_que *rsp; | 2209 | struct rsp_que *rsp; |
2210 | struct device_reg_82xx __iomem *reg; | 2210 | struct device_reg_82xx __iomem *reg; |
2211 | unsigned long flags; | ||
2211 | 2212 | ||
2212 | rsp = (struct rsp_que *) dev_id; | 2213 | rsp = (struct rsp_que *) dev_id; |
2213 | if (!rsp) { | 2214 | if (!rsp) { |
@@ -2218,11 +2219,11 @@ qla82xx_msix_rsp_q(int irq, void *dev_id) | |||
2218 | 2219 | ||
2219 | ha = rsp->hw; | 2220 | ha = rsp->hw; |
2220 | reg = &ha->iobase->isp82; | 2221 | reg = &ha->iobase->isp82; |
2221 | spin_lock_irq(&ha->hardware_lock); | 2222 | spin_lock_irqsave(&ha->hardware_lock, flags); |
2222 | vha = pci_get_drvdata(ha->pdev); | 2223 | vha = pci_get_drvdata(ha->pdev); |
2223 | qla24xx_process_response_queue(vha, rsp); | 2224 | qla24xx_process_response_queue(vha, rsp); |
2224 | WRT_REG_DWORD(®->host_int, 0); | 2225 | WRT_REG_DWORD(®->host_int, 0); |
2225 | spin_unlock_irq(&ha->hardware_lock); | 2226 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
2226 | return IRQ_HANDLED; | 2227 | return IRQ_HANDLED; |
2227 | } | 2228 | } |
2228 | 2229 | ||
@@ -2838,6 +2839,16 @@ sufficient_dsds: | |||
2838 | int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); | 2839 | int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); |
2839 | host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); | 2840 | host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); |
2840 | 2841 | ||
2842 | /* build FCP_CMND IU */ | ||
2843 | memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); | ||
2844 | int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); | ||
2845 | ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; | ||
2846 | |||
2847 | if (cmd->sc_data_direction == DMA_TO_DEVICE) | ||
2848 | ctx->fcp_cmnd->additional_cdb_len |= 1; | ||
2849 | else if (cmd->sc_data_direction == DMA_FROM_DEVICE) | ||
2850 | ctx->fcp_cmnd->additional_cdb_len |= 2; | ||
2851 | |||
2841 | /* | 2852 | /* |
2842 | * Update tagged queuing modifier -- default is TSK_SIMPLE (0). | 2853 | * Update tagged queuing modifier -- default is TSK_SIMPLE (0). |
2843 | */ | 2854 | */ |
@@ -2854,16 +2865,6 @@ sufficient_dsds: | |||
2854 | } | 2865 | } |
2855 | } | 2866 | } |
2856 | 2867 | ||
2857 | /* build FCP_CMND IU */ | ||
2858 | memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); | ||
2859 | int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); | ||
2860 | ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; | ||
2861 | |||
2862 | if (cmd->sc_data_direction == DMA_TO_DEVICE) | ||
2863 | ctx->fcp_cmnd->additional_cdb_len |= 1; | ||
2864 | else if (cmd->sc_data_direction == DMA_FROM_DEVICE) | ||
2865 | ctx->fcp_cmnd->additional_cdb_len |= 2; | ||
2866 | |||
2867 | memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); | 2868 | memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); |
2868 | 2869 | ||
2869 | fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + | 2870 | fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e02df276804e..4cace3f20c04 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -106,17 +106,21 @@ MODULE_PARM_DESC(ql2xmaxqdepth, | |||
106 | "Maximum queue depth to report for target devices."); | 106 | "Maximum queue depth to report for target devices."); |
107 | 107 | ||
108 | /* Do not change the value of this after module load */ | 108 | /* Do not change the value of this after module load */ |
109 | int ql2xenabledif = 1; | 109 | int ql2xenabledif = 0; |
110 | module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR); | 110 | module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR); |
111 | MODULE_PARM_DESC(ql2xenabledif, | 111 | MODULE_PARM_DESC(ql2xenabledif, |
112 | " Enable T10-CRC-DIF " | 112 | " Enable T10-CRC-DIF " |
113 | " Default is 0 - No DIF Support. 1 - Enable it"); | 113 | " Default is 0 - No DIF Support. 1 - Enable it" |
114 | ", 2 - Enable DIF for all types, except Type 0."); | ||
114 | 115 | ||
115 | int ql2xenablehba_err_chk; | 116 | int ql2xenablehba_err_chk = 2; |
116 | module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); | 117 | module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); |
117 | MODULE_PARM_DESC(ql2xenablehba_err_chk, | 118 | MODULE_PARM_DESC(ql2xenablehba_err_chk, |
118 | " Enable T10-CRC-DIF Error isolation by HBA" | 119 | " Enable T10-CRC-DIF Error isolation by HBA:\n" |
119 | " Default is 0 - Error isolation disabled, 1 - Enable it"); | 120 | " Default is 1.\n" |
121 | " 0 -- Error isolation disabled\n" | ||
122 | " 1 -- Error isolation enabled only for DIX Type 0\n" | ||
123 | " 2 -- Error isolation enabled for all Types\n"); | ||
120 | 124 | ||
121 | int ql2xiidmaenable=1; | 125 | int ql2xiidmaenable=1; |
122 | module_param(ql2xiidmaenable, int, S_IRUGO); | 126 | module_param(ql2xiidmaenable, int, S_IRUGO); |
@@ -909,7 +913,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
909 | "Abort command mbx success.\n"); | 913 | "Abort command mbx success.\n"); |
910 | wait = 1; | 914 | wait = 1; |
911 | } | 915 | } |
916 | |||
917 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
912 | qla2x00_sp_compl(ha, sp); | 918 | qla2x00_sp_compl(ha, sp); |
919 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
920 | |||
921 | /* Did the command return during mailbox execution? */ | ||
922 | if (ret == FAILED && !CMD_SP(cmd)) | ||
923 | ret = SUCCESS; | ||
913 | 924 | ||
914 | /* Wait for the command to be returned. */ | 925 | /* Wait for the command to be returned. */ |
915 | if (wait) { | 926 | if (wait) { |
@@ -2251,7 +2262,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2251 | host->this_id = 255; | 2262 | host->this_id = 255; |
2252 | host->cmd_per_lun = 3; | 2263 | host->cmd_per_lun = 3; |
2253 | host->unique_id = host->host_no; | 2264 | host->unique_id = host->host_no; |
2254 | if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) | 2265 | if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) |
2255 | host->max_cmd_len = 32; | 2266 | host->max_cmd_len = 32; |
2256 | else | 2267 | else |
2257 | host->max_cmd_len = MAX_CMDSZ; | 2268 | host->max_cmd_len = MAX_CMDSZ; |
@@ -2378,13 +2389,16 @@ skip_dpc: | |||
2378 | "Detected hba at address=%p.\n", | 2389 | "Detected hba at address=%p.\n", |
2379 | ha); | 2390 | ha); |
2380 | 2391 | ||
2381 | if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { | 2392 | if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { |
2382 | if (ha->fw_attributes & BIT_4) { | 2393 | if (ha->fw_attributes & BIT_4) { |
2394 | int prot = 0; | ||
2383 | base_vha->flags.difdix_supported = 1; | 2395 | base_vha->flags.difdix_supported = 1; |
2384 | ql_dbg(ql_dbg_init, base_vha, 0x00f1, | 2396 | ql_dbg(ql_dbg_init, base_vha, 0x00f1, |
2385 | "Registering for DIF/DIX type 1 and 3 protection.\n"); | 2397 | "Registering for DIF/DIX type 1 and 3 protection.\n"); |
2398 | if (ql2xenabledif == 1) | ||
2399 | prot = SHOST_DIX_TYPE0_PROTECTION; | ||
2386 | scsi_host_set_prot(host, | 2400 | scsi_host_set_prot(host, |
2387 | SHOST_DIF_TYPE1_PROTECTION | 2401 | prot | SHOST_DIF_TYPE1_PROTECTION |
2388 | | SHOST_DIF_TYPE2_PROTECTION | 2402 | | SHOST_DIF_TYPE2_PROTECTION |
2389 | | SHOST_DIF_TYPE3_PROTECTION | 2403 | | SHOST_DIF_TYPE3_PROTECTION |
2390 | | SHOST_DIX_TYPE1_PROTECTION | 2404 | | SHOST_DIX_TYPE1_PROTECTION |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 062c97bf62f5..13b6357c1fa2 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.03.07.03-k" | 10 | #define QLA2XXX_VERSION "8.03.07.07-k" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 3 | 13 | #define QLA_DRIVER_MINOR_VER 3 |
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig index 2c33ce6eac1e..0f5599e0abf6 100644 --- a/drivers/scsi/qla4xxx/Kconfig +++ b/drivers/scsi/qla4xxx/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config SCSI_QLA_ISCSI | 1 | config SCSI_QLA_ISCSI |
2 | tristate "QLogic ISP4XXX and ISP82XX host adapter family support" | 2 | tristate "QLogic ISP4XXX and ISP82XX host adapter family support" |
3 | depends on PCI && SCSI | 3 | depends on PCI && SCSI && NET |
4 | select SCSI_ISCSI_ATTRS | 4 | select SCSI_ISCSI_ATTRS |
5 | ---help--- | 5 | ---help--- |
6 | This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX) | 6 | This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX) |
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c index f33e2dd97934..33b2ed451e09 100644 --- a/drivers/sh/intc/chip.c +++ b/drivers/sh/intc/chip.c | |||
@@ -186,6 +186,9 @@ static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = { | |||
186 | !defined(CONFIG_CPU_SUBTYPE_SH7709) | 186 | !defined(CONFIG_CPU_SUBTYPE_SH7709) |
187 | [IRQ_TYPE_LEVEL_HIGH] = VALID(3), | 187 | [IRQ_TYPE_LEVEL_HIGH] = VALID(3), |
188 | #endif | 188 | #endif |
189 | #if defined(CONFIG_ARCH_SH7372) | ||
190 | [IRQ_TYPE_EDGE_BOTH] = VALID(4), | ||
191 | #endif | ||
189 | }; | 192 | }; |
190 | 193 | ||
191 | static int intc_set_type(struct irq_data *data, unsigned int type) | 194 | static int intc_set_type(struct irq_data *data, unsigned int type) |
diff --git a/drivers/staging/brcm80211/brcmsmac/otp.c b/drivers/staging/brcm80211/brcmsmac/otp.c index 34253cf37812..4a70180eba5d 100644 --- a/drivers/staging/brcm80211/brcmsmac/otp.c +++ b/drivers/staging/brcm80211/brcmsmac/otp.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
19 | #include <linux/string.h> | ||
19 | 20 | ||
20 | #include <brcm_hw_ids.h> | 21 | #include <brcm_hw_ids.h> |
21 | #include <chipcommon.h> | 22 | #include <chipcommon.h> |
diff --git a/drivers/staging/brcm80211/brcmsmac/types.h b/drivers/staging/brcm80211/brcmsmac/types.h index bbf21897ae0e..823b5e4672e2 100644 --- a/drivers/staging/brcm80211/brcmsmac/types.h +++ b/drivers/staging/brcm80211/brcmsmac/types.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #define _BRCM_TYPES_H_ | 18 | #define _BRCM_TYPES_H_ |
19 | 19 | ||
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <linux/io.h> | ||
21 | 22 | ||
22 | /* Bus types */ | 23 | /* Bus types */ |
23 | #define SI_BUS 0 /* SOC Interconnect */ | 24 | #define SI_BUS 0 /* SOC Interconnect */ |
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c index 6859af0778cf..7611def97d06 100644 --- a/drivers/staging/comedi/drivers/ni_labpc.c +++ b/drivers/staging/comedi/drivers/ni_labpc.c | |||
@@ -241,8 +241,10 @@ static int labpc_eeprom_write_insn(struct comedi_device *dev, | |||
241 | struct comedi_insn *insn, | 241 | struct comedi_insn *insn, |
242 | unsigned int *data); | 242 | unsigned int *data); |
243 | static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd); | 243 | static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd); |
244 | #ifdef CONFIG_COMEDI_PCI | 244 | #ifdef CONFIG_ISA_DMA_API |
245 | static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd); | 245 | static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd); |
246 | #endif | ||
247 | #ifdef CONFIG_COMEDI_PCI | ||
246 | static int labpc_find_device(struct comedi_device *dev, int bus, int slot); | 248 | static int labpc_find_device(struct comedi_device *dev, int bus, int slot); |
247 | #endif | 249 | #endif |
248 | static int labpc_dio_mem_callback(int dir, int port, int data, | 250 | static int labpc_dio_mem_callback(int dir, int port, int data, |
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.c b/drivers/staging/gma500/mdfld_dsi_dbi.c index 02e17c9c8637..fd211f3467c4 100644 --- a/drivers/staging/gma500/mdfld_dsi_dbi.c +++ b/drivers/staging/gma500/mdfld_dsi_dbi.c | |||
@@ -711,10 +711,11 @@ struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev, | |||
711 | /* Create drm encoder object */ | 711 | /* Create drm encoder object */ |
712 | connector = &dsi_connector->base.base; | 712 | connector = &dsi_connector->base.base; |
713 | encoder = &dbi_output->base.base; | 713 | encoder = &dbi_output->base.base; |
714 | /* Review this if we ever get MIPI-HDMI bridges or similar */ | ||
714 | drm_encoder_init(dev, | 715 | drm_encoder_init(dev, |
715 | encoder, | 716 | encoder, |
716 | p_funcs->encoder_funcs, | 717 | p_funcs->encoder_funcs, |
717 | DRM_MODE_ENCODER_MIPI); | 718 | DRM_MODE_ENCODER_LVDS); |
718 | drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs); | 719 | drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs); |
719 | 720 | ||
720 | /* Attach to given connector */ | 721 | /* Attach to given connector */ |
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.h b/drivers/staging/gma500/mdfld_dsi_dbi.h index dc6242c51d0b..f0fa986fd934 100644 --- a/drivers/staging/gma500/mdfld_dsi_dbi.h +++ b/drivers/staging/gma500/mdfld_dsi_dbi.h | |||
@@ -42,9 +42,6 @@ | |||
42 | #include "mdfld_dsi_output.h" | 42 | #include "mdfld_dsi_output.h" |
43 | #include "mdfld_output.h" | 43 | #include "mdfld_output.h" |
44 | 44 | ||
45 | #define DRM_MODE_ENCODER_MIPI 5 | ||
46 | |||
47 | |||
48 | /* | 45 | /* |
49 | * DBI encoder which inherits from mdfld_dsi_encoder | 46 | * DBI encoder which inherits from mdfld_dsi_encoder |
50 | */ | 47 | */ |
diff --git a/drivers/staging/gma500/mdfld_dsi_dpi.c b/drivers/staging/gma500/mdfld_dsi_dpi.c index 6e03a91e947e..e685f1217baa 100644 --- a/drivers/staging/gma500/mdfld_dsi_dpi.c +++ b/drivers/staging/gma500/mdfld_dsi_dpi.c | |||
@@ -777,10 +777,15 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, | |||
777 | /* Create drm encoder object */ | 777 | /* Create drm encoder object */ |
778 | connector = &dsi_connector->base.base; | 778 | connector = &dsi_connector->base.base; |
779 | encoder = &dpi_output->base.base; | 779 | encoder = &dpi_output->base.base; |
780 | /* | ||
781 | * On existing hardware this will be a panel of some form, | ||
782 | * if future devices also have HDMI bridges this will need | ||
783 | * revisiting | ||
784 | */ | ||
780 | drm_encoder_init(dev, | 785 | drm_encoder_init(dev, |
781 | encoder, | 786 | encoder, |
782 | p_funcs->encoder_funcs, | 787 | p_funcs->encoder_funcs, |
783 | DRM_MODE_ENCODER_MIPI); | 788 | DRM_MODE_ENCODER_LVDS); |
784 | drm_encoder_helper_add(encoder, | 789 | drm_encoder_helper_add(encoder, |
785 | p_funcs->encoder_helper_funcs); | 790 | p_funcs->encoder_helper_funcs); |
786 | 791 | ||
diff --git a/drivers/staging/gma500/mdfld_dsi_output.c b/drivers/staging/gma500/mdfld_dsi_output.c index 7536095c30a0..9050c0f78b15 100644 --- a/drivers/staging/gma500/mdfld_dsi_output.c +++ b/drivers/staging/gma500/mdfld_dsi_output.c | |||
@@ -955,7 +955,9 @@ void mdfld_dsi_output_init(struct drm_device *dev, | |||
955 | psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2; | 955 | psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2; |
956 | 956 | ||
957 | connector = &psb_output->base; | 957 | connector = &psb_output->base; |
958 | drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs, DRM_MODE_CONNECTOR_MIPI); | 958 | /* Revisit type if MIPI/HDMI bridges ever appear on Medfield */ |
959 | drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs, | ||
960 | DRM_MODE_CONNECTOR_LVDS); | ||
959 | drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs); | 961 | drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs); |
960 | 962 | ||
961 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 963 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
diff --git a/drivers/staging/gma500/medfield.h b/drivers/staging/gma500/medfield.h index 38165e8367e5..09e9687431f1 100644 --- a/drivers/staging/gma500/medfield.h +++ b/drivers/staging/gma500/medfield.h | |||
@@ -21,8 +21,6 @@ | |||
21 | * DEALINGS IN THE SOFTWARE. | 21 | * DEALINGS IN THE SOFTWARE. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #define DRM_MODE_ENCODER_MIPI 5 | ||
25 | |||
26 | /* Medfield DSI controller registers */ | 24 | /* Medfield DSI controller registers */ |
27 | 25 | ||
28 | #define MIPIA_DEVICE_READY_REG 0xb000 | 26 | #define MIPIA_DEVICE_READY_REG 0xb000 |
diff --git a/drivers/staging/gma500/psb_drv.h b/drivers/staging/gma500/psb_drv.h index 72f487a2a1b7..fd4732dd783a 100644 --- a/drivers/staging/gma500/psb_drv.h +++ b/drivers/staging/gma500/psb_drv.h | |||
@@ -35,7 +35,6 @@ | |||
35 | 35 | ||
36 | /* Append new drm mode definition here, align with libdrm definition */ | 36 | /* Append new drm mode definition here, align with libdrm definition */ |
37 | #define DRM_MODE_SCALE_NO_SCALE 2 | 37 | #define DRM_MODE_SCALE_NO_SCALE 2 |
38 | #define DRM_MODE_CONNECTOR_MIPI 15 | ||
39 | 38 | ||
40 | enum { | 39 | enum { |
41 | CHIP_PSB_8108 = 0, /* Poulsbo */ | 40 | CHIP_PSB_8108 = 0, /* Poulsbo */ |
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c index 9c0d2936e486..c3d73f8431ae 100644 --- a/drivers/staging/octeon/ethernet-rgmii.c +++ b/drivers/staging/octeon/ethernet-rgmii.c | |||
@@ -26,6 +26,7 @@ | |||
26 | **********************************************************************/ | 26 | **********************************************************************/ |
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/netdevice.h> | 28 | #include <linux/netdevice.h> |
29 | #include <linux/interrupt.h> | ||
29 | #include <linux/phy.h> | 30 | #include <linux/phy.h> |
30 | #include <linux/ratelimit.h> | 31 | #include <linux/ratelimit.h> |
31 | #include <net/dst.h> | 32 | #include <net/dst.h> |
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c index 970825421884..d0e2d514968a 100644 --- a/drivers/staging/octeon/ethernet-spi.c +++ b/drivers/staging/octeon/ethernet-spi.c | |||
@@ -26,6 +26,7 @@ | |||
26 | **********************************************************************/ | 26 | **********************************************************************/ |
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/netdevice.h> | 28 | #include <linux/netdevice.h> |
29 | #include <linux/interrupt.h> | ||
29 | #include <net/dst.h> | 30 | #include <net/dst.h> |
30 | 31 | ||
31 | #include <asm/octeon/octeon.h> | 32 | #include <asm/octeon/octeon.h> |
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c index 589a0554332e..3d1279c424a8 100644 --- a/drivers/staging/tidspbridge/core/dsp-clock.c +++ b/drivers/staging/tidspbridge/core/dsp-clock.c | |||
@@ -209,7 +209,6 @@ int dsp_clk_enable(enum dsp_clk_id clk_id) | |||
209 | break; | 209 | break; |
210 | #ifdef CONFIG_OMAP_MCBSP | 210 | #ifdef CONFIG_OMAP_MCBSP |
211 | case MCBSP_CLK: | 211 | case MCBSP_CLK: |
212 | omap_mcbsp_set_io_type(MCBSP_ID(clk_id), OMAP_MCBSP_POLL_IO); | ||
213 | omap_mcbsp_request(MCBSP_ID(clk_id)); | 212 | omap_mcbsp_request(MCBSP_ID(clk_id)); |
214 | omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC); | 213 | omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC); |
215 | break; | 214 | break; |
diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c index 975e34bcd722..1ca66ea9b281 100644 --- a/drivers/staging/zcache/tmem.c +++ b/drivers/staging/zcache/tmem.c | |||
@@ -604,7 +604,7 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index, | |||
604 | struct tmem_obj *obj; | 604 | struct tmem_obj *obj; |
605 | void *pampd; | 605 | void *pampd; |
606 | bool ephemeral = is_ephemeral(pool); | 606 | bool ephemeral = is_ephemeral(pool); |
607 | uint32_t ret = -1; | 607 | int ret = -1; |
608 | struct tmem_hashbucket *hb; | 608 | struct tmem_hashbucket *hb; |
609 | bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral); | 609 | bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral); |
610 | bool lock_held = false; | 610 | bool lock_held = false; |
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index 855a5bb56a47..462fbc20561f 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c | |||
@@ -1158,7 +1158,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph, | |||
1158 | size_t clen; | 1158 | size_t clen; |
1159 | int ret; | 1159 | int ret; |
1160 | unsigned long count; | 1160 | unsigned long count; |
1161 | struct page *page = virt_to_page(data); | 1161 | struct page *page = (struct page *)(data); |
1162 | struct zcache_client *cli = pool->client; | 1162 | struct zcache_client *cli = pool->client; |
1163 | uint16_t client_id = get_client_id_from_client(cli); | 1163 | uint16_t client_id = get_client_id_from_client(cli); |
1164 | unsigned long zv_mean_zsize; | 1164 | unsigned long zv_mean_zsize; |
@@ -1227,7 +1227,7 @@ static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw, | |||
1227 | int ret = 0; | 1227 | int ret = 0; |
1228 | 1228 | ||
1229 | BUG_ON(is_ephemeral(pool)); | 1229 | BUG_ON(is_ephemeral(pool)); |
1230 | zv_decompress(virt_to_page(data), pampd); | 1230 | zv_decompress((struct page *)(data), pampd); |
1231 | return ret; | 1231 | return ret; |
1232 | } | 1232 | } |
1233 | 1233 | ||
@@ -1242,7 +1242,7 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw, | |||
1242 | int ret = 0; | 1242 | int ret = 0; |
1243 | 1243 | ||
1244 | BUG_ON(!is_ephemeral(pool)); | 1244 | BUG_ON(!is_ephemeral(pool)); |
1245 | zbud_decompress(virt_to_page(data), pampd); | 1245 | zbud_decompress((struct page *)(data), pampd); |
1246 | zbud_free_and_delist((struct zbud_hdr *)pampd); | 1246 | zbud_free_and_delist((struct zbud_hdr *)pampd); |
1247 | atomic_dec(&zcache_curr_eph_pampd_count); | 1247 | atomic_dec(&zcache_curr_eph_pampd_count); |
1248 | return ret; | 1248 | return ret; |
@@ -1539,7 +1539,7 @@ static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp, | |||
1539 | goto out; | 1539 | goto out; |
1540 | if (!zcache_freeze && zcache_do_preload(pool) == 0) { | 1540 | if (!zcache_freeze && zcache_do_preload(pool) == 0) { |
1541 | /* preload does preempt_disable on success */ | 1541 | /* preload does preempt_disable on success */ |
1542 | ret = tmem_put(pool, oidp, index, page_address(page), | 1542 | ret = tmem_put(pool, oidp, index, (char *)(page), |
1543 | PAGE_SIZE, 0, is_ephemeral(pool)); | 1543 | PAGE_SIZE, 0, is_ephemeral(pool)); |
1544 | if (ret < 0) { | 1544 | if (ret < 0) { |
1545 | if (is_ephemeral(pool)) | 1545 | if (is_ephemeral(pool)) |
@@ -1572,7 +1572,7 @@ static int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp, | |||
1572 | pool = zcache_get_pool_by_id(cli_id, pool_id); | 1572 | pool = zcache_get_pool_by_id(cli_id, pool_id); |
1573 | if (likely(pool != NULL)) { | 1573 | if (likely(pool != NULL)) { |
1574 | if (atomic_read(&pool->obj_count) > 0) | 1574 | if (atomic_read(&pool->obj_count) > 0) |
1575 | ret = tmem_get(pool, oidp, index, page_address(page), | 1575 | ret = tmem_get(pool, oidp, index, (char *)(page), |
1576 | &size, 0, is_ephemeral(pool)); | 1576 | &size, 0, is_ephemeral(pool)); |
1577 | zcache_put_pool(pool); | 1577 | zcache_put_pool(pool); |
1578 | } | 1578 | } |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index c24fb10de60b..6a4ea29c2f36 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -2243,7 +2243,6 @@ static int iscsit_handle_snack( | |||
2243 | case 0: | 2243 | case 0: |
2244 | return iscsit_handle_recovery_datain_or_r2t(conn, buf, | 2244 | return iscsit_handle_recovery_datain_or_r2t(conn, buf, |
2245 | hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); | 2245 | hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); |
2246 | return 0; | ||
2247 | case ISCSI_FLAG_SNACK_TYPE_STATUS: | 2246 | case ISCSI_FLAG_SNACK_TYPE_STATUS: |
2248 | return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, | 2247 | return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, |
2249 | hdr->begrun, hdr->runlength); | 2248 | hdr->begrun, hdr->runlength); |
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index f095e65b1ccf..f1643dbf6a92 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c | |||
@@ -268,7 +268,7 @@ struct se_tpg_np *lio_target_call_addnptotpg( | |||
268 | ISCSI_TCP); | 268 | ISCSI_TCP); |
269 | if (IS_ERR(tpg_np)) { | 269 | if (IS_ERR(tpg_np)) { |
270 | iscsit_put_tpg(tpg); | 270 | iscsit_put_tpg(tpg); |
271 | return ERR_PTR(PTR_ERR(tpg_np)); | 271 | return ERR_CAST(tpg_np); |
272 | } | 272 | } |
273 | pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n"); | 273 | pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n"); |
274 | 274 | ||
@@ -1285,7 +1285,7 @@ struct se_wwn *lio_target_call_coreaddtiqn( | |||
1285 | 1285 | ||
1286 | tiqn = iscsit_add_tiqn((unsigned char *)name); | 1286 | tiqn = iscsit_add_tiqn((unsigned char *)name); |
1287 | if (IS_ERR(tiqn)) | 1287 | if (IS_ERR(tiqn)) |
1288 | return ERR_PTR(PTR_ERR(tiqn)); | 1288 | return ERR_CAST(tiqn); |
1289 | /* | 1289 | /* |
1290 | * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group. | 1290 | * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group. |
1291 | */ | 1291 | */ |
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index 980650792cf6..c4c68da3e500 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c | |||
@@ -834,7 +834,7 @@ static int iscsit_attach_ooo_cmdsn( | |||
834 | */ | 834 | */ |
835 | list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, | 835 | list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, |
836 | ooo_list) { | 836 | ooo_list) { |
837 | while (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) | 837 | if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) |
838 | continue; | 838 | continue; |
839 | 839 | ||
840 | list_add(&ooo_cmdsn->ooo_list, | 840 | list_add(&ooo_cmdsn->ooo_list, |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index bcaf82f47037..daad362a93ce 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
@@ -1013,19 +1013,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) | |||
1013 | ISCSI_LOGIN_STATUS_TARGET_ERROR); | 1013 | ISCSI_LOGIN_STATUS_TARGET_ERROR); |
1014 | goto new_sess_out; | 1014 | goto new_sess_out; |
1015 | } | 1015 | } |
1016 | #if 0 | 1016 | snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", |
1017 | if (!iscsi_ntop6((const unsigned char *) | 1017 | &sock_in6.sin6_addr.in6_u); |
1018 | &sock_in6.sin6_addr.in6_u, | 1018 | conn->login_port = ntohs(sock_in6.sin6_port); |
1019 | (char *)&conn->ipv6_login_ip[0], | ||
1020 | IPV6_ADDRESS_SPACE)) { | ||
1021 | pr_err("iscsi_ntop6() failed\n"); | ||
1022 | iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, | ||
1023 | ISCSI_LOGIN_STATUS_TARGET_ERROR); | ||
1024 | goto new_sess_out; | ||
1025 | } | ||
1026 | #else | ||
1027 | pr_debug("Skipping iscsi_ntop6()\n"); | ||
1028 | #endif | ||
1029 | } else { | 1019 | } else { |
1030 | memset(&sock_in, 0, sizeof(struct sockaddr_in)); | 1020 | memset(&sock_in, 0, sizeof(struct sockaddr_in)); |
1031 | 1021 | ||
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 252e246cf51e..5b773160200f 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c | |||
@@ -545,13 +545,13 @@ int iscsi_copy_param_list( | |||
545 | struct iscsi_param_list *src_param_list, | 545 | struct iscsi_param_list *src_param_list, |
546 | int leading) | 546 | int leading) |
547 | { | 547 | { |
548 | struct iscsi_param *new_param = NULL, *param = NULL; | 548 | struct iscsi_param *param = NULL; |
549 | struct iscsi_param *new_param = NULL; | ||
549 | struct iscsi_param_list *param_list = NULL; | 550 | struct iscsi_param_list *param_list = NULL; |
550 | 551 | ||
551 | param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); | 552 | param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); |
552 | if (!param_list) { | 553 | if (!param_list) { |
553 | pr_err("Unable to allocate memory for" | 554 | pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); |
554 | " struct iscsi_param_list.\n"); | ||
555 | goto err_out; | 555 | goto err_out; |
556 | } | 556 | } |
557 | INIT_LIST_HEAD(¶m_list->param_list); | 557 | INIT_LIST_HEAD(¶m_list->param_list); |
@@ -567,8 +567,17 @@ int iscsi_copy_param_list( | |||
567 | 567 | ||
568 | new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL); | 568 | new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL); |
569 | if (!new_param) { | 569 | if (!new_param) { |
570 | pr_err("Unable to allocate memory for" | 570 | pr_err("Unable to allocate memory for struct iscsi_param.\n"); |
571 | " struct iscsi_param.\n"); | 571 | goto err_out; |
572 | } | ||
573 | |||
574 | new_param->name = kstrdup(param->name, GFP_KERNEL); | ||
575 | new_param->value = kstrdup(param->value, GFP_KERNEL); | ||
576 | if (!new_param->value || !new_param->name) { | ||
577 | kfree(new_param->value); | ||
578 | kfree(new_param->name); | ||
579 | kfree(new_param); | ||
580 | pr_err("Unable to allocate memory for parameter name/value.\n"); | ||
572 | goto err_out; | 581 | goto err_out; |
573 | } | 582 | } |
574 | 583 | ||
@@ -580,32 +589,12 @@ int iscsi_copy_param_list( | |||
580 | new_param->use = param->use; | 589 | new_param->use = param->use; |
581 | new_param->type_range = param->type_range; | 590 | new_param->type_range = param->type_range; |
582 | 591 | ||
583 | new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL); | ||
584 | if (!new_param->name) { | ||
585 | pr_err("Unable to allocate memory for" | ||
586 | " parameter name.\n"); | ||
587 | goto err_out; | ||
588 | } | ||
589 | |||
590 | new_param->value = kzalloc(strlen(param->value) + 1, | ||
591 | GFP_KERNEL); | ||
592 | if (!new_param->value) { | ||
593 | pr_err("Unable to allocate memory for" | ||
594 | " parameter value.\n"); | ||
595 | goto err_out; | ||
596 | } | ||
597 | |||
598 | memcpy(new_param->name, param->name, strlen(param->name)); | ||
599 | new_param->name[strlen(param->name)] = '\0'; | ||
600 | memcpy(new_param->value, param->value, strlen(param->value)); | ||
601 | new_param->value[strlen(param->value)] = '\0'; | ||
602 | |||
603 | list_add_tail(&new_param->p_list, ¶m_list->param_list); | 592 | list_add_tail(&new_param->p_list, ¶m_list->param_list); |
604 | } | 593 | } |
605 | 594 | ||
606 | if (!list_empty(¶m_list->param_list)) | 595 | if (!list_empty(¶m_list->param_list)) { |
607 | *dst_param_list = param_list; | 596 | *dst_param_list = param_list; |
608 | else { | 597 | } else { |
609 | pr_err("No parameters allocated.\n"); | 598 | pr_err("No parameters allocated.\n"); |
610 | goto err_out; | 599 | goto err_out; |
611 | } | 600 | } |
@@ -1441,7 +1430,7 @@ static int iscsi_enforce_integrity_rules( | |||
1441 | u8 DataSequenceInOrder = 0; | 1430 | u8 DataSequenceInOrder = 0; |
1442 | u8 ErrorRecoveryLevel = 0, SessionType = 0; | 1431 | u8 ErrorRecoveryLevel = 0, SessionType = 0; |
1443 | u8 IFMarker = 0, OFMarker = 0; | 1432 | u8 IFMarker = 0, OFMarker = 0; |
1444 | u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0; | 1433 | u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1; |
1445 | u32 FirstBurstLength = 0, MaxBurstLength = 0; | 1434 | u32 FirstBurstLength = 0, MaxBurstLength = 0; |
1446 | struct iscsi_param *param = NULL; | 1435 | struct iscsi_param *param = NULL; |
1447 | 1436 | ||
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index a1acb0167902..f00137f377b2 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c | |||
@@ -243,7 +243,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( | |||
243 | if (!cmd->tmr_req) { | 243 | if (!cmd->tmr_req) { |
244 | pr_err("Unable to allocate memory for" | 244 | pr_err("Unable to allocate memory for" |
245 | " Task Management command!\n"); | 245 | " Task Management command!\n"); |
246 | return NULL; | 246 | goto out; |
247 | } | 247 | } |
248 | /* | 248 | /* |
249 | * TASK_REASSIGN for ERL=2 / connection stays inside of | 249 | * TASK_REASSIGN for ERL=2 / connection stays inside of |
@@ -298,8 +298,6 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( | |||
298 | return cmd; | 298 | return cmd; |
299 | out: | 299 | out: |
300 | iscsit_release_cmd(cmd); | 300 | iscsit_release_cmd(cmd); |
301 | if (se_cmd) | ||
302 | transport_free_se_cmd(se_cmd); | ||
303 | return NULL; | 301 | return NULL; |
304 | } | 302 | } |
305 | 303 | ||
@@ -877,40 +875,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess) | |||
877 | } | 875 | } |
878 | 876 | ||
879 | /* | 877 | /* |
880 | * Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker | ||
881 | * array counts needed for sync and steering. | ||
882 | */ | ||
883 | static int iscsit_determine_sync_and_steering_counts( | ||
884 | struct iscsi_conn *conn, | ||
885 | struct iscsi_data_count *count) | ||
886 | { | ||
887 | u32 length = count->data_length; | ||
888 | u32 marker, markint; | ||
889 | |||
890 | count->sync_and_steering = 1; | ||
891 | |||
892 | marker = (count->type == ISCSI_RX_DATA) ? | ||
893 | conn->of_marker : conn->if_marker; | ||
894 | markint = (count->type == ISCSI_RX_DATA) ? | ||
895 | (conn->conn_ops->OFMarkInt * 4) : | ||
896 | (conn->conn_ops->IFMarkInt * 4); | ||
897 | count->ss_iov_count = count->iov_count; | ||
898 | |||
899 | while (length > 0) { | ||
900 | if (length >= marker) { | ||
901 | count->ss_iov_count += 3; | ||
902 | count->ss_marker_count += 2; | ||
903 | |||
904 | length -= marker; | ||
905 | marker = markint; | ||
906 | } else | ||
907 | length = 0; | ||
908 | } | ||
909 | |||
910 | return 0; | ||
911 | } | ||
912 | |||
913 | /* | ||
914 | * Setup conn->if_marker and conn->of_marker values based upon | 878 | * Setup conn->if_marker and conn->of_marker values based upon |
915 | * the initial marker-less interval. (see iSCSI v19 A.2) | 879 | * the initial marker-less interval. (see iSCSI v19 A.2) |
916 | */ | 880 | */ |
@@ -1292,7 +1256,7 @@ int iscsit_fe_sendpage_sg( | |||
1292 | struct kvec iov; | 1256 | struct kvec iov; |
1293 | u32 tx_hdr_size, data_len; | 1257 | u32 tx_hdr_size, data_len; |
1294 | u32 offset = cmd->first_data_sg_off; | 1258 | u32 offset = cmd->first_data_sg_off; |
1295 | int tx_sent; | 1259 | int tx_sent, iov_off; |
1296 | 1260 | ||
1297 | send_hdr: | 1261 | send_hdr: |
1298 | tx_hdr_size = ISCSI_HDR_LEN; | 1262 | tx_hdr_size = ISCSI_HDR_LEN; |
@@ -1312,9 +1276,19 @@ send_hdr: | |||
1312 | } | 1276 | } |
1313 | 1277 | ||
1314 | data_len = cmd->tx_size - tx_hdr_size - cmd->padding; | 1278 | data_len = cmd->tx_size - tx_hdr_size - cmd->padding; |
1315 | if (conn->conn_ops->DataDigest) | 1279 | /* |
1280 | * Set iov_off used by padding and data digest tx_data() calls below | ||
1281 | * in order to determine proper offset into cmd->iov_data[] | ||
1282 | */ | ||
1283 | if (conn->conn_ops->DataDigest) { | ||
1316 | data_len -= ISCSI_CRC_LEN; | 1284 | data_len -= ISCSI_CRC_LEN; |
1317 | 1285 | if (cmd->padding) | |
1286 | iov_off = (cmd->iov_data_count - 2); | ||
1287 | else | ||
1288 | iov_off = (cmd->iov_data_count - 1); | ||
1289 | } else { | ||
1290 | iov_off = (cmd->iov_data_count - 1); | ||
1291 | } | ||
1318 | /* | 1292 | /* |
1319 | * Perform sendpage() for each page in the scatterlist | 1293 | * Perform sendpage() for each page in the scatterlist |
1320 | */ | 1294 | */ |
@@ -1343,8 +1317,7 @@ send_pg: | |||
1343 | 1317 | ||
1344 | send_padding: | 1318 | send_padding: |
1345 | if (cmd->padding) { | 1319 | if (cmd->padding) { |
1346 | struct kvec *iov_p = | 1320 | struct kvec *iov_p = &cmd->iov_data[iov_off++]; |
1347 | &cmd->iov_data[cmd->iov_data_count-1]; | ||
1348 | 1321 | ||
1349 | tx_sent = tx_data(conn, iov_p, 1, cmd->padding); | 1322 | tx_sent = tx_data(conn, iov_p, 1, cmd->padding); |
1350 | if (cmd->padding != tx_sent) { | 1323 | if (cmd->padding != tx_sent) { |
@@ -1358,8 +1331,7 @@ send_padding: | |||
1358 | 1331 | ||
1359 | send_datacrc: | 1332 | send_datacrc: |
1360 | if (conn->conn_ops->DataDigest) { | 1333 | if (conn->conn_ops->DataDigest) { |
1361 | struct kvec *iov_d = | 1334 | struct kvec *iov_d = &cmd->iov_data[iov_off]; |
1362 | &cmd->iov_data[cmd->iov_data_count]; | ||
1363 | 1335 | ||
1364 | tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); | 1336 | tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); |
1365 | if (ISCSI_CRC_LEN != tx_sent) { | 1337 | if (ISCSI_CRC_LEN != tx_sent) { |
@@ -1433,8 +1405,7 @@ static int iscsit_do_rx_data( | |||
1433 | struct iscsi_data_count *count) | 1405 | struct iscsi_data_count *count) |
1434 | { | 1406 | { |
1435 | int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; | 1407 | int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; |
1436 | u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0; | 1408 | struct kvec *iov_p; |
1437 | struct kvec iov[count->ss_iov_count], *iov_p; | ||
1438 | struct msghdr msg; | 1409 | struct msghdr msg; |
1439 | 1410 | ||
1440 | if (!conn || !conn->sock || !conn->conn_ops) | 1411 | if (!conn || !conn->sock || !conn->conn_ops) |
@@ -1442,93 +1413,8 @@ static int iscsit_do_rx_data( | |||
1442 | 1413 | ||
1443 | memset(&msg, 0, sizeof(struct msghdr)); | 1414 | memset(&msg, 0, sizeof(struct msghdr)); |
1444 | 1415 | ||
1445 | if (count->sync_and_steering) { | 1416 | iov_p = count->iov; |
1446 | int size = 0; | 1417 | iov_len = count->iov_count; |
1447 | u32 i, orig_iov_count = 0; | ||
1448 | u32 orig_iov_len = 0, orig_iov_loc = 0; | ||
1449 | u32 iov_count = 0, per_iov_bytes = 0; | ||
1450 | u32 *rx_marker, old_rx_marker = 0; | ||
1451 | struct kvec *iov_record; | ||
1452 | |||
1453 | memset(&rx_marker_val, 0, | ||
1454 | count->ss_marker_count * sizeof(u32)); | ||
1455 | memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec)); | ||
1456 | |||
1457 | iov_record = count->iov; | ||
1458 | orig_iov_count = count->iov_count; | ||
1459 | rx_marker = &conn->of_marker; | ||
1460 | |||
1461 | i = 0; | ||
1462 | size = data; | ||
1463 | orig_iov_len = iov_record[orig_iov_loc].iov_len; | ||
1464 | while (size > 0) { | ||
1465 | pr_debug("rx_data: #1 orig_iov_len %u," | ||
1466 | " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc); | ||
1467 | pr_debug("rx_data: #2 rx_marker %u, size" | ||
1468 | " %u\n", *rx_marker, size); | ||
1469 | |||
1470 | if (orig_iov_len >= *rx_marker) { | ||
1471 | iov[iov_count].iov_len = *rx_marker; | ||
1472 | iov[iov_count++].iov_base = | ||
1473 | (iov_record[orig_iov_loc].iov_base + | ||
1474 | per_iov_bytes); | ||
1475 | |||
1476 | iov[iov_count].iov_len = (MARKER_SIZE / 2); | ||
1477 | iov[iov_count++].iov_base = | ||
1478 | &rx_marker_val[rx_marker_iov++]; | ||
1479 | iov[iov_count].iov_len = (MARKER_SIZE / 2); | ||
1480 | iov[iov_count++].iov_base = | ||
1481 | &rx_marker_val[rx_marker_iov++]; | ||
1482 | old_rx_marker = *rx_marker; | ||
1483 | |||
1484 | /* | ||
1485 | * OFMarkInt is in 32-bit words. | ||
1486 | */ | ||
1487 | *rx_marker = (conn->conn_ops->OFMarkInt * 4); | ||
1488 | size -= old_rx_marker; | ||
1489 | orig_iov_len -= old_rx_marker; | ||
1490 | per_iov_bytes += old_rx_marker; | ||
1491 | |||
1492 | pr_debug("rx_data: #3 new_rx_marker" | ||
1493 | " %u, size %u\n", *rx_marker, size); | ||
1494 | } else { | ||
1495 | iov[iov_count].iov_len = orig_iov_len; | ||
1496 | iov[iov_count++].iov_base = | ||
1497 | (iov_record[orig_iov_loc].iov_base + | ||
1498 | per_iov_bytes); | ||
1499 | |||
1500 | per_iov_bytes = 0; | ||
1501 | *rx_marker -= orig_iov_len; | ||
1502 | size -= orig_iov_len; | ||
1503 | |||
1504 | if (size) | ||
1505 | orig_iov_len = | ||
1506 | iov_record[++orig_iov_loc].iov_len; | ||
1507 | |||
1508 | pr_debug("rx_data: #4 new_rx_marker" | ||
1509 | " %u, size %u\n", *rx_marker, size); | ||
1510 | } | ||
1511 | } | ||
1512 | data += (rx_marker_iov * (MARKER_SIZE / 2)); | ||
1513 | |||
1514 | iov_p = &iov[0]; | ||
1515 | iov_len = iov_count; | ||
1516 | |||
1517 | if (iov_count > count->ss_iov_count) { | ||
1518 | pr_err("iov_count: %d, count->ss_iov_count:" | ||
1519 | " %d\n", iov_count, count->ss_iov_count); | ||
1520 | return -1; | ||
1521 | } | ||
1522 | if (rx_marker_iov > count->ss_marker_count) { | ||
1523 | pr_err("rx_marker_iov: %d, count->ss_marker" | ||
1524 | "_count: %d\n", rx_marker_iov, | ||
1525 | count->ss_marker_count); | ||
1526 | return -1; | ||
1527 | } | ||
1528 | } else { | ||
1529 | iov_p = count->iov; | ||
1530 | iov_len = count->iov_count; | ||
1531 | } | ||
1532 | 1418 | ||
1533 | while (total_rx < data) { | 1419 | while (total_rx < data) { |
1534 | rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, | 1420 | rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, |
@@ -1543,16 +1429,6 @@ static int iscsit_do_rx_data( | |||
1543 | rx_loop, total_rx, data); | 1429 | rx_loop, total_rx, data); |
1544 | } | 1430 | } |
1545 | 1431 | ||
1546 | if (count->sync_and_steering) { | ||
1547 | int j; | ||
1548 | for (j = 0; j < rx_marker_iov; j++) { | ||
1549 | pr_debug("rx_data: #5 j: %d, offset: %d\n", | ||
1550 | j, rx_marker_val[j]); | ||
1551 | conn->of_marker_offset = rx_marker_val[j]; | ||
1552 | } | ||
1553 | total_rx -= (rx_marker_iov * (MARKER_SIZE / 2)); | ||
1554 | } | ||
1555 | |||
1556 | return total_rx; | 1432 | return total_rx; |
1557 | } | 1433 | } |
1558 | 1434 | ||
@@ -1561,8 +1437,7 @@ static int iscsit_do_tx_data( | |||
1561 | struct iscsi_data_count *count) | 1437 | struct iscsi_data_count *count) |
1562 | { | 1438 | { |
1563 | int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; | 1439 | int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; |
1564 | u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0; | 1440 | struct kvec *iov_p; |
1565 | struct kvec iov[count->ss_iov_count], *iov_p; | ||
1566 | struct msghdr msg; | 1441 | struct msghdr msg; |
1567 | 1442 | ||
1568 | if (!conn || !conn->sock || !conn->conn_ops) | 1443 | if (!conn || !conn->sock || !conn->conn_ops) |
@@ -1575,98 +1450,8 @@ static int iscsit_do_tx_data( | |||
1575 | 1450 | ||
1576 | memset(&msg, 0, sizeof(struct msghdr)); | 1451 | memset(&msg, 0, sizeof(struct msghdr)); |
1577 | 1452 | ||
1578 | if (count->sync_and_steering) { | 1453 | iov_p = count->iov; |
1579 | int size = 0; | 1454 | iov_len = count->iov_count; |
1580 | u32 i, orig_iov_count = 0; | ||
1581 | u32 orig_iov_len = 0, orig_iov_loc = 0; | ||
1582 | u32 iov_count = 0, per_iov_bytes = 0; | ||
1583 | u32 *tx_marker, old_tx_marker = 0; | ||
1584 | struct kvec *iov_record; | ||
1585 | |||
1586 | memset(&tx_marker_val, 0, | ||
1587 | count->ss_marker_count * sizeof(u32)); | ||
1588 | memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec)); | ||
1589 | |||
1590 | iov_record = count->iov; | ||
1591 | orig_iov_count = count->iov_count; | ||
1592 | tx_marker = &conn->if_marker; | ||
1593 | |||
1594 | i = 0; | ||
1595 | size = data; | ||
1596 | orig_iov_len = iov_record[orig_iov_loc].iov_len; | ||
1597 | while (size > 0) { | ||
1598 | pr_debug("tx_data: #1 orig_iov_len %u," | ||
1599 | " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc); | ||
1600 | pr_debug("tx_data: #2 tx_marker %u, size" | ||
1601 | " %u\n", *tx_marker, size); | ||
1602 | |||
1603 | if (orig_iov_len >= *tx_marker) { | ||
1604 | iov[iov_count].iov_len = *tx_marker; | ||
1605 | iov[iov_count++].iov_base = | ||
1606 | (iov_record[orig_iov_loc].iov_base + | ||
1607 | per_iov_bytes); | ||
1608 | |||
1609 | tx_marker_val[tx_marker_iov] = | ||
1610 | (size - *tx_marker); | ||
1611 | iov[iov_count].iov_len = (MARKER_SIZE / 2); | ||
1612 | iov[iov_count++].iov_base = | ||
1613 | &tx_marker_val[tx_marker_iov++]; | ||
1614 | iov[iov_count].iov_len = (MARKER_SIZE / 2); | ||
1615 | iov[iov_count++].iov_base = | ||
1616 | &tx_marker_val[tx_marker_iov++]; | ||
1617 | old_tx_marker = *tx_marker; | ||
1618 | |||
1619 | /* | ||
1620 | * IFMarkInt is in 32-bit words. | ||
1621 | */ | ||
1622 | *tx_marker = (conn->conn_ops->IFMarkInt * 4); | ||
1623 | size -= old_tx_marker; | ||
1624 | orig_iov_len -= old_tx_marker; | ||
1625 | per_iov_bytes += old_tx_marker; | ||
1626 | |||
1627 | pr_debug("tx_data: #3 new_tx_marker" | ||
1628 | " %u, size %u\n", *tx_marker, size); | ||
1629 | pr_debug("tx_data: #4 offset %u\n", | ||
1630 | tx_marker_val[tx_marker_iov-1]); | ||
1631 | } else { | ||
1632 | iov[iov_count].iov_len = orig_iov_len; | ||
1633 | iov[iov_count++].iov_base | ||
1634 | = (iov_record[orig_iov_loc].iov_base + | ||
1635 | per_iov_bytes); | ||
1636 | |||
1637 | per_iov_bytes = 0; | ||
1638 | *tx_marker -= orig_iov_len; | ||
1639 | size -= orig_iov_len; | ||
1640 | |||
1641 | if (size) | ||
1642 | orig_iov_len = | ||
1643 | iov_record[++orig_iov_loc].iov_len; | ||
1644 | |||
1645 | pr_debug("tx_data: #5 new_tx_marker" | ||
1646 | " %u, size %u\n", *tx_marker, size); | ||
1647 | } | ||
1648 | } | ||
1649 | |||
1650 | data += (tx_marker_iov * (MARKER_SIZE / 2)); | ||
1651 | |||
1652 | iov_p = &iov[0]; | ||
1653 | iov_len = iov_count; | ||
1654 | |||
1655 | if (iov_count > count->ss_iov_count) { | ||
1656 | pr_err("iov_count: %d, count->ss_iov_count:" | ||
1657 | " %d\n", iov_count, count->ss_iov_count); | ||
1658 | return -1; | ||
1659 | } | ||
1660 | if (tx_marker_iov > count->ss_marker_count) { | ||
1661 | pr_err("tx_marker_iov: %d, count->ss_marker" | ||
1662 | "_count: %d\n", tx_marker_iov, | ||
1663 | count->ss_marker_count); | ||
1664 | return -1; | ||
1665 | } | ||
1666 | } else { | ||
1667 | iov_p = count->iov; | ||
1668 | iov_len = count->iov_count; | ||
1669 | } | ||
1670 | 1455 | ||
1671 | while (total_tx < data) { | 1456 | while (total_tx < data) { |
1672 | tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, | 1457 | tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, |
@@ -1681,9 +1466,6 @@ static int iscsit_do_tx_data( | |||
1681 | tx_loop, total_tx, data); | 1466 | tx_loop, total_tx, data); |
1682 | } | 1467 | } |
1683 | 1468 | ||
1684 | if (count->sync_and_steering) | ||
1685 | total_tx -= (tx_marker_iov * (MARKER_SIZE / 2)); | ||
1686 | |||
1687 | return total_tx; | 1469 | return total_tx; |
1688 | } | 1470 | } |
1689 | 1471 | ||
@@ -1704,12 +1486,6 @@ int rx_data( | |||
1704 | c.data_length = data; | 1486 | c.data_length = data; |
1705 | c.type = ISCSI_RX_DATA; | 1487 | c.type = ISCSI_RX_DATA; |
1706 | 1488 | ||
1707 | if (conn->conn_ops->OFMarker && | ||
1708 | (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) { | ||
1709 | if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0) | ||
1710 | return -1; | ||
1711 | } | ||
1712 | |||
1713 | return iscsit_do_rx_data(conn, &c); | 1489 | return iscsit_do_rx_data(conn, &c); |
1714 | } | 1490 | } |
1715 | 1491 | ||
@@ -1730,12 +1506,6 @@ int tx_data( | |||
1730 | c.data_length = data; | 1506 | c.data_length = data; |
1731 | c.type = ISCSI_TX_DATA; | 1507 | c.type = ISCSI_TX_DATA; |
1732 | 1508 | ||
1733 | if (conn->conn_ops->IFMarker && | ||
1734 | (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) { | ||
1735 | if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0) | ||
1736 | return -1; | ||
1737 | } | ||
1738 | |||
1739 | return iscsit_do_tx_data(conn, &c); | 1509 | return iscsit_do_tx_data(conn, &c); |
1740 | } | 1510 | } |
1741 | 1511 | ||
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 8ae09a1bdf74..f04d4ef99dca 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c | |||
@@ -24,6 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/ctype.h> | ||
27 | #include <asm/unaligned.h> | 28 | #include <asm/unaligned.h> |
28 | #include <scsi/scsi.h> | 29 | #include <scsi/scsi.h> |
29 | 30 | ||
@@ -67,6 +68,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) | |||
67 | { | 68 | { |
68 | struct se_lun *lun = cmd->se_lun; | 69 | struct se_lun *lun = cmd->se_lun; |
69 | struct se_device *dev = cmd->se_dev; | 70 | struct se_device *dev = cmd->se_dev; |
71 | struct se_portal_group *tpg = lun->lun_sep->sep_tpg; | ||
70 | unsigned char *buf; | 72 | unsigned char *buf; |
71 | 73 | ||
72 | /* | 74 | /* |
@@ -81,9 +83,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd) | |||
81 | 83 | ||
82 | buf = transport_kmap_first_data_page(cmd); | 84 | buf = transport_kmap_first_data_page(cmd); |
83 | 85 | ||
84 | buf[0] = dev->transport->get_device_type(dev); | 86 | if (dev == tpg->tpg_virt_lun0.lun_se_dev) { |
85 | if (buf[0] == TYPE_TAPE) | 87 | buf[0] = 0x3f; /* Not connected */ |
86 | buf[1] = 0x80; | 88 | } else { |
89 | buf[0] = dev->transport->get_device_type(dev); | ||
90 | if (buf[0] == TYPE_TAPE) | ||
91 | buf[1] = 0x80; | ||
92 | } | ||
87 | buf[2] = dev->transport->get_device_rev(dev); | 93 | buf[2] = dev->transport->get_device_rev(dev); |
88 | 94 | ||
89 | /* | 95 | /* |
@@ -149,6 +155,37 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) | |||
149 | return 0; | 155 | return 0; |
150 | } | 156 | } |
151 | 157 | ||
158 | static void | ||
159 | target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off) | ||
160 | { | ||
161 | unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; | ||
162 | unsigned char *buf = buf_off; | ||
163 | int cnt = 0, next = 1; | ||
164 | /* | ||
165 | * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on | ||
166 | * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field | ||
167 | * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION | ||
168 | * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL | ||
169 | * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure | ||
170 | * per device uniqeness. | ||
171 | */ | ||
172 | while (*p != '\0') { | ||
173 | if (cnt >= 13) | ||
174 | break; | ||
175 | if (!isxdigit(*p)) { | ||
176 | p++; | ||
177 | continue; | ||
178 | } | ||
179 | if (next != 0) { | ||
180 | buf[cnt++] |= hex_to_bin(*p++); | ||
181 | next = 0; | ||
182 | } else { | ||
183 | buf[cnt] = hex_to_bin(*p++) << 4; | ||
184 | next = 1; | ||
185 | } | ||
186 | } | ||
187 | } | ||
188 | |||
152 | /* | 189 | /* |
153 | * Device identification VPD, for a complete list of | 190 | * Device identification VPD, for a complete list of |
154 | * DESIGNATOR TYPEs see spc4r17 Table 459. | 191 | * DESIGNATOR TYPEs see spc4r17 Table 459. |
@@ -214,8 +251,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) | |||
214 | * VENDOR_SPECIFIC_IDENTIFIER and | 251 | * VENDOR_SPECIFIC_IDENTIFIER and |
215 | * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION | 252 | * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION |
216 | */ | 253 | */ |
217 | buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]); | 254 | target_parse_naa_6h_vendor_specific(dev, &buf[off]); |
218 | hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12); | ||
219 | 255 | ||
220 | len = 20; | 256 | len = 20; |
221 | off = (len + 4); | 257 | off = (len + 4); |
@@ -915,8 +951,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) | |||
915 | length += target_modesense_control(dev, &buf[offset+length]); | 951 | length += target_modesense_control(dev, &buf[offset+length]); |
916 | break; | 952 | break; |
917 | default: | 953 | default: |
918 | pr_err("Got Unknown Mode Page: 0x%02x\n", | 954 | pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", |
919 | cdb[2] & 0x3f); | 955 | cdb[2] & 0x3f, cdb[3]); |
920 | return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; | 956 | return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; |
921 | } | 957 | } |
922 | offset += length; | 958 | offset += length; |
@@ -1072,8 +1108,6 @@ target_emulate_unmap(struct se_task *task) | |||
1072 | size -= 16; | 1108 | size -= 16; |
1073 | } | 1109 | } |
1074 | 1110 | ||
1075 | task->task_scsi_status = GOOD; | ||
1076 | transport_complete_task(task, 1); | ||
1077 | err: | 1111 | err: |
1078 | transport_kunmap_first_data_page(cmd); | 1112 | transport_kunmap_first_data_page(cmd); |
1079 | 1113 | ||
@@ -1085,24 +1119,17 @@ err: | |||
1085 | * Note this is not used for TCM/pSCSI passthrough | 1119 | * Note this is not used for TCM/pSCSI passthrough |
1086 | */ | 1120 | */ |
1087 | static int | 1121 | static int |
1088 | target_emulate_write_same(struct se_task *task, int write_same32) | 1122 | target_emulate_write_same(struct se_task *task, u32 num_blocks) |
1089 | { | 1123 | { |
1090 | struct se_cmd *cmd = task->task_se_cmd; | 1124 | struct se_cmd *cmd = task->task_se_cmd; |
1091 | struct se_device *dev = cmd->se_dev; | 1125 | struct se_device *dev = cmd->se_dev; |
1092 | sector_t range; | 1126 | sector_t range; |
1093 | sector_t lba = cmd->t_task_lba; | 1127 | sector_t lba = cmd->t_task_lba; |
1094 | unsigned int num_blocks; | ||
1095 | int ret; | 1128 | int ret; |
1096 | /* | 1129 | /* |
1097 | * Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict | 1130 | * Use the explicit range when non zero is supplied, otherwise calculate |
1098 | * range when non zero is supplied, otherwise calculate the remaining | 1131 | * the remaining range based on ->get_blocks() - starting LBA. |
1099 | * range based on ->get_blocks() - starting LBA. | ||
1100 | */ | 1132 | */ |
1101 | if (write_same32) | ||
1102 | num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); | ||
1103 | else | ||
1104 | num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); | ||
1105 | |||
1106 | if (num_blocks != 0) | 1133 | if (num_blocks != 0) |
1107 | range = num_blocks; | 1134 | range = num_blocks; |
1108 | else | 1135 | else |
@@ -1117,8 +1144,6 @@ target_emulate_write_same(struct se_task *task, int write_same32) | |||
1117 | return ret; | 1144 | return ret; |
1118 | } | 1145 | } |
1119 | 1146 | ||
1120 | task->task_scsi_status = GOOD; | ||
1121 | transport_complete_task(task, 1); | ||
1122 | return 0; | 1147 | return 0; |
1123 | } | 1148 | } |
1124 | 1149 | ||
@@ -1165,13 +1190,23 @@ transport_emulate_control_cdb(struct se_task *task) | |||
1165 | } | 1190 | } |
1166 | ret = target_emulate_unmap(task); | 1191 | ret = target_emulate_unmap(task); |
1167 | break; | 1192 | break; |
1193 | case WRITE_SAME: | ||
1194 | if (!dev->transport->do_discard) { | ||
1195 | pr_err("WRITE_SAME emulation not supported" | ||
1196 | " for: %s\n", dev->transport->name); | ||
1197 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | ||
1198 | } | ||
1199 | ret = target_emulate_write_same(task, | ||
1200 | get_unaligned_be16(&cmd->t_task_cdb[7])); | ||
1201 | break; | ||
1168 | case WRITE_SAME_16: | 1202 | case WRITE_SAME_16: |
1169 | if (!dev->transport->do_discard) { | 1203 | if (!dev->transport->do_discard) { |
1170 | pr_err("WRITE_SAME_16 emulation not supported" | 1204 | pr_err("WRITE_SAME_16 emulation not supported" |
1171 | " for: %s\n", dev->transport->name); | 1205 | " for: %s\n", dev->transport->name); |
1172 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1206 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1173 | } | 1207 | } |
1174 | ret = target_emulate_write_same(task, 0); | 1208 | ret = target_emulate_write_same(task, |
1209 | get_unaligned_be32(&cmd->t_task_cdb[10])); | ||
1175 | break; | 1210 | break; |
1176 | case VARIABLE_LENGTH_CMD: | 1211 | case VARIABLE_LENGTH_CMD: |
1177 | service_action = | 1212 | service_action = |
@@ -1184,7 +1219,8 @@ transport_emulate_control_cdb(struct se_task *task) | |||
1184 | dev->transport->name); | 1219 | dev->transport->name); |
1185 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1220 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1186 | } | 1221 | } |
1187 | ret = target_emulate_write_same(task, 1); | 1222 | ret = target_emulate_write_same(task, |
1223 | get_unaligned_be32(&cmd->t_task_cdb[28])); | ||
1188 | break; | 1224 | break; |
1189 | default: | 1225 | default: |
1190 | pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" | 1226 | pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" |
@@ -1219,8 +1255,14 @@ transport_emulate_control_cdb(struct se_task *task) | |||
1219 | 1255 | ||
1220 | if (ret < 0) | 1256 | if (ret < 0) |
1221 | return ret; | 1257 | return ret; |
1222 | task->task_scsi_status = GOOD; | 1258 | /* |
1223 | transport_complete_task(task, 1); | 1259 | * Handle the successful completion here unless a caller |
1260 | * has explictly requested an asychronous completion. | ||
1261 | */ | ||
1262 | if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { | ||
1263 | task->task_scsi_status = GOOD; | ||
1264 | transport_complete_task(task, 1); | ||
1265 | } | ||
1224 | 1266 | ||
1225 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | 1267 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; |
1226 | } | 1268 | } |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index b38b6c993e65..ca6e4a4df134 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) | |||
472 | struct se_dev_entry *deve; | 472 | struct se_dev_entry *deve; |
473 | u32 i; | 473 | u32 i; |
474 | 474 | ||
475 | spin_lock_bh(&tpg->acl_node_lock); | 475 | spin_lock_irq(&tpg->acl_node_lock); |
476 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { | 476 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { |
477 | spin_unlock_bh(&tpg->acl_node_lock); | 477 | spin_unlock_irq(&tpg->acl_node_lock); |
478 | 478 | ||
479 | spin_lock_irq(&nacl->device_list_lock); | 479 | spin_lock_irq(&nacl->device_list_lock); |
480 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 480 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
@@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) | |||
491 | } | 491 | } |
492 | spin_unlock_irq(&nacl->device_list_lock); | 492 | spin_unlock_irq(&nacl->device_list_lock); |
493 | 493 | ||
494 | spin_lock_bh(&tpg->acl_node_lock); | 494 | spin_lock_irq(&tpg->acl_node_lock); |
495 | } | 495 | } |
496 | spin_unlock_bh(&tpg->acl_node_lock); | 496 | spin_unlock_irq(&tpg->acl_node_lock); |
497 | } | 497 | } |
498 | 498 | ||
499 | static struct se_port *core_alloc_port(struct se_device *dev) | 499 | static struct se_port *core_alloc_port(struct se_device *dev) |
@@ -839,6 +839,24 @@ int se_dev_check_shutdown(struct se_device *dev) | |||
839 | return ret; | 839 | return ret; |
840 | } | 840 | } |
841 | 841 | ||
842 | u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) | ||
843 | { | ||
844 | u32 tmp, aligned_max_sectors; | ||
845 | /* | ||
846 | * Limit max_sectors to a PAGE_SIZE aligned value for modern | ||
847 | * transport_allocate_data_tasks() operation. | ||
848 | */ | ||
849 | tmp = rounddown((max_sectors * block_size), PAGE_SIZE); | ||
850 | aligned_max_sectors = (tmp / block_size); | ||
851 | if (max_sectors != aligned_max_sectors) { | ||
852 | printk(KERN_INFO "Rounding down aligned max_sectors from %u" | ||
853 | " to %u\n", max_sectors, aligned_max_sectors); | ||
854 | return aligned_max_sectors; | ||
855 | } | ||
856 | |||
857 | return max_sectors; | ||
858 | } | ||
859 | |||
842 | void se_dev_set_default_attribs( | 860 | void se_dev_set_default_attribs( |
843 | struct se_device *dev, | 861 | struct se_device *dev, |
844 | struct se_dev_limits *dev_limits) | 862 | struct se_dev_limits *dev_limits) |
@@ -878,6 +896,11 @@ void se_dev_set_default_attribs( | |||
878 | * max_sectors is based on subsystem plugin dependent requirements. | 896 | * max_sectors is based on subsystem plugin dependent requirements. |
879 | */ | 897 | */ |
880 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; | 898 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; |
899 | /* | ||
900 | * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() | ||
901 | */ | ||
902 | limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors, | ||
903 | limits->logical_block_size); | ||
881 | dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; | 904 | dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; |
882 | /* | 905 | /* |
883 | * Set optimal_sectors from max_sectors, which can be lowered via | 906 | * Set optimal_sectors from max_sectors, which can be lowered via |
@@ -1242,6 +1265,11 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) | |||
1242 | return -EINVAL; | 1265 | return -EINVAL; |
1243 | } | 1266 | } |
1244 | } | 1267 | } |
1268 | /* | ||
1269 | * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() | ||
1270 | */ | ||
1271 | max_sectors = se_dev_align_max_sectors(max_sectors, | ||
1272 | dev->se_sub_dev->se_dev_attrib.block_size); | ||
1245 | 1273 | ||
1246 | dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; | 1274 | dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; |
1247 | pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", | 1275 | pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", |
@@ -1344,15 +1372,17 @@ struct se_lun *core_dev_add_lun( | |||
1344 | */ | 1372 | */ |
1345 | if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { | 1373 | if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { |
1346 | struct se_node_acl *acl; | 1374 | struct se_node_acl *acl; |
1347 | spin_lock_bh(&tpg->acl_node_lock); | 1375 | spin_lock_irq(&tpg->acl_node_lock); |
1348 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | 1376 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { |
1349 | if (acl->dynamic_node_acl) { | 1377 | if (acl->dynamic_node_acl && |
1350 | spin_unlock_bh(&tpg->acl_node_lock); | 1378 | (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || |
1379 | !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { | ||
1380 | spin_unlock_irq(&tpg->acl_node_lock); | ||
1351 | core_tpg_add_node_to_devs(acl, tpg); | 1381 | core_tpg_add_node_to_devs(acl, tpg); |
1352 | spin_lock_bh(&tpg->acl_node_lock); | 1382 | spin_lock_irq(&tpg->acl_node_lock); |
1353 | } | 1383 | } |
1354 | } | 1384 | } |
1355 | spin_unlock_bh(&tpg->acl_node_lock); | 1385 | spin_unlock_irq(&tpg->acl_node_lock); |
1356 | } | 1386 | } |
1357 | 1387 | ||
1358 | return lun_p; | 1388 | return lun_p; |
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index f1654694f4ea..55bbe0847a6d 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -481,7 +481,7 @@ static struct config_group *target_fabric_make_nodeacl( | |||
481 | 481 | ||
482 | se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); | 482 | se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); |
483 | if (IS_ERR(se_nacl)) | 483 | if (IS_ERR(se_nacl)) |
484 | return ERR_PTR(PTR_ERR(se_nacl)); | 484 | return ERR_CAST(se_nacl); |
485 | 485 | ||
486 | nacl_cg = &se_nacl->acl_group; | 486 | nacl_cg = &se_nacl->acl_group; |
487 | nacl_cg->default_groups = se_nacl->acl_default_groups; | 487 | nacl_cg->default_groups = se_nacl->acl_default_groups; |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 1c1b849cd4fb..7fd3a161f7cc 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -1598,14 +1598,14 @@ static int core_scsi3_decode_spec_i_port( | |||
1598 | * from the decoded fabric module specific TransportID | 1598 | * from the decoded fabric module specific TransportID |
1599 | * at *i_str. | 1599 | * at *i_str. |
1600 | */ | 1600 | */ |
1601 | spin_lock_bh(&tmp_tpg->acl_node_lock); | 1601 | spin_lock_irq(&tmp_tpg->acl_node_lock); |
1602 | dest_node_acl = __core_tpg_get_initiator_node_acl( | 1602 | dest_node_acl = __core_tpg_get_initiator_node_acl( |
1603 | tmp_tpg, i_str); | 1603 | tmp_tpg, i_str); |
1604 | if (dest_node_acl) { | 1604 | if (dest_node_acl) { |
1605 | atomic_inc(&dest_node_acl->acl_pr_ref_count); | 1605 | atomic_inc(&dest_node_acl->acl_pr_ref_count); |
1606 | smp_mb__after_atomic_inc(); | 1606 | smp_mb__after_atomic_inc(); |
1607 | } | 1607 | } |
1608 | spin_unlock_bh(&tmp_tpg->acl_node_lock); | 1608 | spin_unlock_irq(&tmp_tpg->acl_node_lock); |
1609 | 1609 | ||
1610 | if (!dest_node_acl) { | 1610 | if (!dest_node_acl) { |
1611 | core_scsi3_tpg_undepend_item(tmp_tpg); | 1611 | core_scsi3_tpg_undepend_item(tmp_tpg); |
@@ -3496,14 +3496,14 @@ after_iport_check: | |||
3496 | /* | 3496 | /* |
3497 | * Locate the destination struct se_node_acl from the received Transport ID | 3497 | * Locate the destination struct se_node_acl from the received Transport ID |
3498 | */ | 3498 | */ |
3499 | spin_lock_bh(&dest_se_tpg->acl_node_lock); | 3499 | spin_lock_irq(&dest_se_tpg->acl_node_lock); |
3500 | dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, | 3500 | dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, |
3501 | initiator_str); | 3501 | initiator_str); |
3502 | if (dest_node_acl) { | 3502 | if (dest_node_acl) { |
3503 | atomic_inc(&dest_node_acl->acl_pr_ref_count); | 3503 | atomic_inc(&dest_node_acl->acl_pr_ref_count); |
3504 | smp_mb__after_atomic_inc(); | 3504 | smp_mb__after_atomic_inc(); |
3505 | } | 3505 | } |
3506 | spin_unlock_bh(&dest_se_tpg->acl_node_lock); | 3506 | spin_unlock_irq(&dest_se_tpg->acl_node_lock); |
3507 | 3507 | ||
3508 | if (!dest_node_acl) { | 3508 | if (!dest_node_acl) { |
3509 | pr_err("Unable to locate %s dest_node_acl for" | 3509 | pr_err("Unable to locate %s dest_node_acl for" |
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 3dd81d24d9a9..e567e129c697 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
@@ -390,12 +390,10 @@ static int rd_MEMCPY_read(struct rd_request *req) | |||
390 | length = req->rd_size; | 390 | length = req->rd_size; |
391 | 391 | ||
392 | dst = sg_virt(&sg_d[i++]) + dst_offset; | 392 | dst = sg_virt(&sg_d[i++]) + dst_offset; |
393 | if (!dst) | 393 | BUG_ON(!dst); |
394 | BUG(); | ||
395 | 394 | ||
396 | src = sg_virt(&sg_s[j]) + src_offset; | 395 | src = sg_virt(&sg_s[j]) + src_offset; |
397 | if (!src) | 396 | BUG_ON(!src); |
398 | BUG(); | ||
399 | 397 | ||
400 | dst_offset = 0; | 398 | dst_offset = 0; |
401 | src_offset = length; | 399 | src_offset = length; |
@@ -415,8 +413,7 @@ static int rd_MEMCPY_read(struct rd_request *req) | |||
415 | length = req->rd_size; | 413 | length = req->rd_size; |
416 | 414 | ||
417 | dst = sg_virt(&sg_d[i]) + dst_offset; | 415 | dst = sg_virt(&sg_d[i]) + dst_offset; |
418 | if (!dst) | 416 | BUG_ON(!dst); |
419 | BUG(); | ||
420 | 417 | ||
421 | if (sg_d[i].length == length) { | 418 | if (sg_d[i].length == length) { |
422 | i++; | 419 | i++; |
@@ -425,8 +422,7 @@ static int rd_MEMCPY_read(struct rd_request *req) | |||
425 | dst_offset = length; | 422 | dst_offset = length; |
426 | 423 | ||
427 | src = sg_virt(&sg_s[j++]) + src_offset; | 424 | src = sg_virt(&sg_s[j++]) + src_offset; |
428 | if (!src) | 425 | BUG_ON(!src); |
429 | BUG(); | ||
430 | 426 | ||
431 | src_offset = 0; | 427 | src_offset = 0; |
432 | page_end = 1; | 428 | page_end = 1; |
@@ -510,12 +506,10 @@ static int rd_MEMCPY_write(struct rd_request *req) | |||
510 | length = req->rd_size; | 506 | length = req->rd_size; |
511 | 507 | ||
512 | src = sg_virt(&sg_s[i++]) + src_offset; | 508 | src = sg_virt(&sg_s[i++]) + src_offset; |
513 | if (!src) | 509 | BUG_ON(!src); |
514 | BUG(); | ||
515 | 510 | ||
516 | dst = sg_virt(&sg_d[j]) + dst_offset; | 511 | dst = sg_virt(&sg_d[j]) + dst_offset; |
517 | if (!dst) | 512 | BUG_ON(!dst); |
518 | BUG(); | ||
519 | 513 | ||
520 | src_offset = 0; | 514 | src_offset = 0; |
521 | dst_offset = length; | 515 | dst_offset = length; |
@@ -535,8 +529,7 @@ static int rd_MEMCPY_write(struct rd_request *req) | |||
535 | length = req->rd_size; | 529 | length = req->rd_size; |
536 | 530 | ||
537 | src = sg_virt(&sg_s[i]) + src_offset; | 531 | src = sg_virt(&sg_s[i]) + src_offset; |
538 | if (!src) | 532 | BUG_ON(!src); |
539 | BUG(); | ||
540 | 533 | ||
541 | if (sg_s[i].length == length) { | 534 | if (sg_s[i].length == length) { |
542 | i++; | 535 | i++; |
@@ -545,8 +538,7 @@ static int rd_MEMCPY_write(struct rd_request *req) | |||
545 | src_offset = length; | 538 | src_offset = length; |
546 | 539 | ||
547 | dst = sg_virt(&sg_d[j++]) + dst_offset; | 540 | dst = sg_virt(&sg_d[j++]) + dst_offset; |
548 | if (!dst) | 541 | BUG_ON(!dst); |
549 | BUG(); | ||
550 | 542 | ||
551 | dst_offset = 0; | 543 | dst_offset = 0; |
552 | page_end = 1; | 544 | page_end = 1; |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 4f1ba4c5ef11..162b736c7342 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
@@ -137,15 +137,15 @@ struct se_node_acl *core_tpg_get_initiator_node_acl( | |||
137 | { | 137 | { |
138 | struct se_node_acl *acl; | 138 | struct se_node_acl *acl; |
139 | 139 | ||
140 | spin_lock_bh(&tpg->acl_node_lock); | 140 | spin_lock_irq(&tpg->acl_node_lock); |
141 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | 141 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { |
142 | if (!strcmp(acl->initiatorname, initiatorname) && | 142 | if (!strcmp(acl->initiatorname, initiatorname) && |
143 | !acl->dynamic_node_acl) { | 143 | !acl->dynamic_node_acl) { |
144 | spin_unlock_bh(&tpg->acl_node_lock); | 144 | spin_unlock_irq(&tpg->acl_node_lock); |
145 | return acl; | 145 | return acl; |
146 | } | 146 | } |
147 | } | 147 | } |
148 | spin_unlock_bh(&tpg->acl_node_lock); | 148 | spin_unlock_irq(&tpg->acl_node_lock); |
149 | 149 | ||
150 | return NULL; | 150 | return NULL; |
151 | } | 151 | } |
@@ -298,13 +298,21 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
298 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); | 298 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); |
299 | return NULL; | 299 | return NULL; |
300 | } | 300 | } |
301 | /* | ||
302 | * Here we only create demo-mode MappedLUNs from the active | ||
303 | * TPG LUNs if the fabric is not explictly asking for | ||
304 | * tpg_check_demo_mode_login_only() == 1. | ||
305 | */ | ||
306 | if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) && | ||
307 | (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1)) | ||
308 | do { ; } while (0); | ||
309 | else | ||
310 | core_tpg_add_node_to_devs(acl, tpg); | ||
301 | 311 | ||
302 | core_tpg_add_node_to_devs(acl, tpg); | 312 | spin_lock_irq(&tpg->acl_node_lock); |
303 | |||
304 | spin_lock_bh(&tpg->acl_node_lock); | ||
305 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); | 313 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); |
306 | tpg->num_node_acls++; | 314 | tpg->num_node_acls++; |
307 | spin_unlock_bh(&tpg->acl_node_lock); | 315 | spin_unlock_irq(&tpg->acl_node_lock); |
308 | 316 | ||
309 | pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" | 317 | pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" |
310 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), | 318 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
@@ -354,7 +362,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
354 | { | 362 | { |
355 | struct se_node_acl *acl = NULL; | 363 | struct se_node_acl *acl = NULL; |
356 | 364 | ||
357 | spin_lock_bh(&tpg->acl_node_lock); | 365 | spin_lock_irq(&tpg->acl_node_lock); |
358 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | 366 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); |
359 | if (acl) { | 367 | if (acl) { |
360 | if (acl->dynamic_node_acl) { | 368 | if (acl->dynamic_node_acl) { |
@@ -362,7 +370,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
362 | pr_debug("%s_TPG[%u] - Replacing dynamic ACL" | 370 | pr_debug("%s_TPG[%u] - Replacing dynamic ACL" |
363 | " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), | 371 | " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
364 | tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); | 372 | tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); |
365 | spin_unlock_bh(&tpg->acl_node_lock); | 373 | spin_unlock_irq(&tpg->acl_node_lock); |
366 | /* | 374 | /* |
367 | * Release the locally allocated struct se_node_acl | 375 | * Release the locally allocated struct se_node_acl |
368 | * because * core_tpg_add_initiator_node_acl() returned | 376 | * because * core_tpg_add_initiator_node_acl() returned |
@@ -378,10 +386,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
378 | " Node %s already exists for TPG %u, ignoring" | 386 | " Node %s already exists for TPG %u, ignoring" |
379 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), | 387 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), |
380 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); | 388 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
381 | spin_unlock_bh(&tpg->acl_node_lock); | 389 | spin_unlock_irq(&tpg->acl_node_lock); |
382 | return ERR_PTR(-EEXIST); | 390 | return ERR_PTR(-EEXIST); |
383 | } | 391 | } |
384 | spin_unlock_bh(&tpg->acl_node_lock); | 392 | spin_unlock_irq(&tpg->acl_node_lock); |
385 | 393 | ||
386 | if (!se_nacl) { | 394 | if (!se_nacl) { |
387 | pr_err("struct se_node_acl pointer is NULL\n"); | 395 | pr_err("struct se_node_acl pointer is NULL\n"); |
@@ -418,10 +426,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
418 | return ERR_PTR(-EINVAL); | 426 | return ERR_PTR(-EINVAL); |
419 | } | 427 | } |
420 | 428 | ||
421 | spin_lock_bh(&tpg->acl_node_lock); | 429 | spin_lock_irq(&tpg->acl_node_lock); |
422 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); | 430 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); |
423 | tpg->num_node_acls++; | 431 | tpg->num_node_acls++; |
424 | spin_unlock_bh(&tpg->acl_node_lock); | 432 | spin_unlock_irq(&tpg->acl_node_lock); |
425 | 433 | ||
426 | done: | 434 | done: |
427 | pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" | 435 | pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" |
@@ -445,14 +453,14 @@ int core_tpg_del_initiator_node_acl( | |||
445 | struct se_session *sess, *sess_tmp; | 453 | struct se_session *sess, *sess_tmp; |
446 | int dynamic_acl = 0; | 454 | int dynamic_acl = 0; |
447 | 455 | ||
448 | spin_lock_bh(&tpg->acl_node_lock); | 456 | spin_lock_irq(&tpg->acl_node_lock); |
449 | if (acl->dynamic_node_acl) { | 457 | if (acl->dynamic_node_acl) { |
450 | acl->dynamic_node_acl = 0; | 458 | acl->dynamic_node_acl = 0; |
451 | dynamic_acl = 1; | 459 | dynamic_acl = 1; |
452 | } | 460 | } |
453 | list_del(&acl->acl_list); | 461 | list_del(&acl->acl_list); |
454 | tpg->num_node_acls--; | 462 | tpg->num_node_acls--; |
455 | spin_unlock_bh(&tpg->acl_node_lock); | 463 | spin_unlock_irq(&tpg->acl_node_lock); |
456 | 464 | ||
457 | spin_lock_bh(&tpg->session_lock); | 465 | spin_lock_bh(&tpg->session_lock); |
458 | list_for_each_entry_safe(sess, sess_tmp, | 466 | list_for_each_entry_safe(sess, sess_tmp, |
@@ -503,21 +511,21 @@ int core_tpg_set_initiator_node_queue_depth( | |||
503 | struct se_node_acl *acl; | 511 | struct se_node_acl *acl; |
504 | int dynamic_acl = 0; | 512 | int dynamic_acl = 0; |
505 | 513 | ||
506 | spin_lock_bh(&tpg->acl_node_lock); | 514 | spin_lock_irq(&tpg->acl_node_lock); |
507 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | 515 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); |
508 | if (!acl) { | 516 | if (!acl) { |
509 | pr_err("Access Control List entry for %s Initiator" | 517 | pr_err("Access Control List entry for %s Initiator" |
510 | " Node %s does not exists for TPG %hu, ignoring" | 518 | " Node %s does not exists for TPG %hu, ignoring" |
511 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), | 519 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), |
512 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); | 520 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
513 | spin_unlock_bh(&tpg->acl_node_lock); | 521 | spin_unlock_irq(&tpg->acl_node_lock); |
514 | return -ENODEV; | 522 | return -ENODEV; |
515 | } | 523 | } |
516 | if (acl->dynamic_node_acl) { | 524 | if (acl->dynamic_node_acl) { |
517 | acl->dynamic_node_acl = 0; | 525 | acl->dynamic_node_acl = 0; |
518 | dynamic_acl = 1; | 526 | dynamic_acl = 1; |
519 | } | 527 | } |
520 | spin_unlock_bh(&tpg->acl_node_lock); | 528 | spin_unlock_irq(&tpg->acl_node_lock); |
521 | 529 | ||
522 | spin_lock_bh(&tpg->session_lock); | 530 | spin_lock_bh(&tpg->session_lock); |
523 | list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { | 531 | list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { |
@@ -533,10 +541,10 @@ int core_tpg_set_initiator_node_queue_depth( | |||
533 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); | 541 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); |
534 | spin_unlock_bh(&tpg->session_lock); | 542 | spin_unlock_bh(&tpg->session_lock); |
535 | 543 | ||
536 | spin_lock_bh(&tpg->acl_node_lock); | 544 | spin_lock_irq(&tpg->acl_node_lock); |
537 | if (dynamic_acl) | 545 | if (dynamic_acl) |
538 | acl->dynamic_node_acl = 1; | 546 | acl->dynamic_node_acl = 1; |
539 | spin_unlock_bh(&tpg->acl_node_lock); | 547 | spin_unlock_irq(&tpg->acl_node_lock); |
540 | return -EEXIST; | 548 | return -EEXIST; |
541 | } | 549 | } |
542 | /* | 550 | /* |
@@ -571,10 +579,10 @@ int core_tpg_set_initiator_node_queue_depth( | |||
571 | if (init_sess) | 579 | if (init_sess) |
572 | tpg->se_tpg_tfo->close_session(init_sess); | 580 | tpg->se_tpg_tfo->close_session(init_sess); |
573 | 581 | ||
574 | spin_lock_bh(&tpg->acl_node_lock); | 582 | spin_lock_irq(&tpg->acl_node_lock); |
575 | if (dynamic_acl) | 583 | if (dynamic_acl) |
576 | acl->dynamic_node_acl = 1; | 584 | acl->dynamic_node_acl = 1; |
577 | spin_unlock_bh(&tpg->acl_node_lock); | 585 | spin_unlock_irq(&tpg->acl_node_lock); |
578 | return -EINVAL; | 586 | return -EINVAL; |
579 | } | 587 | } |
580 | spin_unlock_bh(&tpg->session_lock); | 588 | spin_unlock_bh(&tpg->session_lock); |
@@ -590,10 +598,10 @@ int core_tpg_set_initiator_node_queue_depth( | |||
590 | initiatorname, tpg->se_tpg_tfo->get_fabric_name(), | 598 | initiatorname, tpg->se_tpg_tfo->get_fabric_name(), |
591 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | 599 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
592 | 600 | ||
593 | spin_lock_bh(&tpg->acl_node_lock); | 601 | spin_lock_irq(&tpg->acl_node_lock); |
594 | if (dynamic_acl) | 602 | if (dynamic_acl) |
595 | acl->dynamic_node_acl = 1; | 603 | acl->dynamic_node_acl = 1; |
596 | spin_unlock_bh(&tpg->acl_node_lock); | 604 | spin_unlock_irq(&tpg->acl_node_lock); |
597 | 605 | ||
598 | return 0; | 606 | return 0; |
599 | } | 607 | } |
@@ -717,20 +725,20 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) | |||
717 | * not been released because of TFO->tpg_check_demo_mode_cache() == 1 | 725 | * not been released because of TFO->tpg_check_demo_mode_cache() == 1 |
718 | * in transport_deregister_session(). | 726 | * in transport_deregister_session(). |
719 | */ | 727 | */ |
720 | spin_lock_bh(&se_tpg->acl_node_lock); | 728 | spin_lock_irq(&se_tpg->acl_node_lock); |
721 | list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, | 729 | list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, |
722 | acl_list) { | 730 | acl_list) { |
723 | list_del(&nacl->acl_list); | 731 | list_del(&nacl->acl_list); |
724 | se_tpg->num_node_acls--; | 732 | se_tpg->num_node_acls--; |
725 | spin_unlock_bh(&se_tpg->acl_node_lock); | 733 | spin_unlock_irq(&se_tpg->acl_node_lock); |
726 | 734 | ||
727 | core_tpg_wait_for_nacl_pr_ref(nacl); | 735 | core_tpg_wait_for_nacl_pr_ref(nacl); |
728 | core_free_device_list_for_node(nacl, se_tpg); | 736 | core_free_device_list_for_node(nacl, se_tpg); |
729 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); | 737 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); |
730 | 738 | ||
731 | spin_lock_bh(&se_tpg->acl_node_lock); | 739 | spin_lock_irq(&se_tpg->acl_node_lock); |
732 | } | 740 | } |
733 | spin_unlock_bh(&se_tpg->acl_node_lock); | 741 | spin_unlock_irq(&se_tpg->acl_node_lock); |
734 | 742 | ||
735 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) | 743 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) |
736 | core_tpg_release_virtual_lun0(se_tpg); | 744 | core_tpg_release_virtual_lun0(se_tpg); |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 89760329d5d0..a4b0a8d27f25 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -389,17 +389,18 @@ void transport_deregister_session(struct se_session *se_sess) | |||
389 | { | 389 | { |
390 | struct se_portal_group *se_tpg = se_sess->se_tpg; | 390 | struct se_portal_group *se_tpg = se_sess->se_tpg; |
391 | struct se_node_acl *se_nacl; | 391 | struct se_node_acl *se_nacl; |
392 | unsigned long flags; | ||
392 | 393 | ||
393 | if (!se_tpg) { | 394 | if (!se_tpg) { |
394 | transport_free_session(se_sess); | 395 | transport_free_session(se_sess); |
395 | return; | 396 | return; |
396 | } | 397 | } |
397 | 398 | ||
398 | spin_lock_bh(&se_tpg->session_lock); | 399 | spin_lock_irqsave(&se_tpg->session_lock, flags); |
399 | list_del(&se_sess->sess_list); | 400 | list_del(&se_sess->sess_list); |
400 | se_sess->se_tpg = NULL; | 401 | se_sess->se_tpg = NULL; |
401 | se_sess->fabric_sess_ptr = NULL; | 402 | se_sess->fabric_sess_ptr = NULL; |
402 | spin_unlock_bh(&se_tpg->session_lock); | 403 | spin_unlock_irqrestore(&se_tpg->session_lock, flags); |
403 | 404 | ||
404 | /* | 405 | /* |
405 | * Determine if we need to do extra work for this initiator node's | 406 | * Determine if we need to do extra work for this initiator node's |
@@ -407,22 +408,22 @@ void transport_deregister_session(struct se_session *se_sess) | |||
407 | */ | 408 | */ |
408 | se_nacl = se_sess->se_node_acl; | 409 | se_nacl = se_sess->se_node_acl; |
409 | if (se_nacl) { | 410 | if (se_nacl) { |
410 | spin_lock_bh(&se_tpg->acl_node_lock); | 411 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); |
411 | if (se_nacl->dynamic_node_acl) { | 412 | if (se_nacl->dynamic_node_acl) { |
412 | if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( | 413 | if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( |
413 | se_tpg)) { | 414 | se_tpg)) { |
414 | list_del(&se_nacl->acl_list); | 415 | list_del(&se_nacl->acl_list); |
415 | se_tpg->num_node_acls--; | 416 | se_tpg->num_node_acls--; |
416 | spin_unlock_bh(&se_tpg->acl_node_lock); | 417 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); |
417 | 418 | ||
418 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | 419 | core_tpg_wait_for_nacl_pr_ref(se_nacl); |
419 | core_free_device_list_for_node(se_nacl, se_tpg); | 420 | core_free_device_list_for_node(se_nacl, se_tpg); |
420 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, | 421 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, |
421 | se_nacl); | 422 | se_nacl); |
422 | spin_lock_bh(&se_tpg->acl_node_lock); | 423 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); |
423 | } | 424 | } |
424 | } | 425 | } |
425 | spin_unlock_bh(&se_tpg->acl_node_lock); | 426 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); |
426 | } | 427 | } |
427 | 428 | ||
428 | transport_free_session(se_sess); | 429 | transport_free_session(se_sess); |
@@ -976,15 +977,17 @@ static void target_qf_do_work(struct work_struct *work) | |||
976 | { | 977 | { |
977 | struct se_device *dev = container_of(work, struct se_device, | 978 | struct se_device *dev = container_of(work, struct se_device, |
978 | qf_work_queue); | 979 | qf_work_queue); |
980 | LIST_HEAD(qf_cmd_list); | ||
979 | struct se_cmd *cmd, *cmd_tmp; | 981 | struct se_cmd *cmd, *cmd_tmp; |
980 | 982 | ||
981 | spin_lock_irq(&dev->qf_cmd_lock); | 983 | spin_lock_irq(&dev->qf_cmd_lock); |
982 | list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { | 984 | list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); |
985 | spin_unlock_irq(&dev->qf_cmd_lock); | ||
983 | 986 | ||
987 | list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { | ||
984 | list_del(&cmd->se_qf_node); | 988 | list_del(&cmd->se_qf_node); |
985 | atomic_dec(&dev->dev_qf_count); | 989 | atomic_dec(&dev->dev_qf_count); |
986 | smp_mb__after_atomic_dec(); | 990 | smp_mb__after_atomic_dec(); |
987 | spin_unlock_irq(&dev->qf_cmd_lock); | ||
988 | 991 | ||
989 | pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" | 992 | pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" |
990 | " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, | 993 | " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, |
@@ -996,10 +999,7 @@ static void target_qf_do_work(struct work_struct *work) | |||
996 | * has been added to head of queue | 999 | * has been added to head of queue |
997 | */ | 1000 | */ |
998 | transport_add_cmd_to_queue(cmd, cmd->t_state); | 1001 | transport_add_cmd_to_queue(cmd, cmd->t_state); |
999 | |||
1000 | spin_lock_irq(&dev->qf_cmd_lock); | ||
1001 | } | 1002 | } |
1002 | spin_unlock_irq(&dev->qf_cmd_lock); | ||
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) | 1005 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) |
@@ -2053,8 +2053,14 @@ static void transport_generic_request_failure( | |||
2053 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | 2053 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; |
2054 | break; | 2054 | break; |
2055 | } | 2055 | } |
2056 | 2056 | /* | |
2057 | if (!sc) | 2057 | * If a fabric does not define a cmd->se_tfo->new_cmd_map caller, |
2058 | * make the call to transport_send_check_condition_and_sense() | ||
2059 | * directly. Otherwise expect the fabric to make the call to | ||
2060 | * transport_send_check_condition_and_sense() after handling | ||
2061 | * possible unsoliticied write data payloads. | ||
2062 | */ | ||
2063 | if (!sc && !cmd->se_tfo->new_cmd_map) | ||
2058 | transport_new_cmd_failure(cmd); | 2064 | transport_new_cmd_failure(cmd); |
2059 | else { | 2065 | else { |
2060 | ret = transport_send_check_condition_and_sense(cmd, | 2066 | ret = transport_send_check_condition_and_sense(cmd, |
@@ -2847,12 +2853,42 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) | |||
2847 | " transport_dev_end_lba(): %llu\n", | 2853 | " transport_dev_end_lba(): %llu\n", |
2848 | cmd->t_task_lba, sectors, | 2854 | cmd->t_task_lba, sectors, |
2849 | transport_dev_end_lba(dev)); | 2855 | transport_dev_end_lba(dev)); |
2850 | pr_err(" We should return CHECK_CONDITION" | 2856 | return -EINVAL; |
2851 | " but we don't yet\n"); | ||
2852 | return 0; | ||
2853 | } | 2857 | } |
2854 | 2858 | ||
2855 | return sectors; | 2859 | return 0; |
2860 | } | ||
2861 | |||
2862 | static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) | ||
2863 | { | ||
2864 | /* | ||
2865 | * Determine if the received WRITE_SAME is used to for direct | ||
2866 | * passthrough into Linux/SCSI with struct request via TCM/pSCSI | ||
2867 | * or we are signaling the use of internal WRITE_SAME + UNMAP=1 | ||
2868 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. | ||
2869 | */ | ||
2870 | int passthrough = (dev->transport->transport_type == | ||
2871 | TRANSPORT_PLUGIN_PHBA_PDEV); | ||
2872 | |||
2873 | if (!passthrough) { | ||
2874 | if ((flags[0] & 0x04) || (flags[0] & 0x02)) { | ||
2875 | pr_err("WRITE_SAME PBDATA and LBDATA" | ||
2876 | " bits not supported for Block Discard" | ||
2877 | " Emulation\n"); | ||
2878 | return -ENOSYS; | ||
2879 | } | ||
2880 | /* | ||
2881 | * Currently for the emulated case we only accept | ||
2882 | * tpws with the UNMAP=1 bit set. | ||
2883 | */ | ||
2884 | if (!(flags[0] & 0x08)) { | ||
2885 | pr_err("WRITE_SAME w/o UNMAP bit not" | ||
2886 | " supported for Block Discard Emulation\n"); | ||
2887 | return -ENOSYS; | ||
2888 | } | ||
2889 | } | ||
2890 | |||
2891 | return 0; | ||
2856 | } | 2892 | } |
2857 | 2893 | ||
2858 | /* transport_generic_cmd_sequencer(): | 2894 | /* transport_generic_cmd_sequencer(): |
@@ -3065,7 +3101,7 @@ static int transport_generic_cmd_sequencer( | |||
3065 | goto out_unsupported_cdb; | 3101 | goto out_unsupported_cdb; |
3066 | 3102 | ||
3067 | if (sectors) | 3103 | if (sectors) |
3068 | size = transport_get_size(sectors, cdb, cmd); | 3104 | size = transport_get_size(1, cdb, cmd); |
3069 | else { | 3105 | else { |
3070 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" | 3106 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" |
3071 | " supported\n"); | 3107 | " supported\n"); |
@@ -3075,27 +3111,9 @@ static int transport_generic_cmd_sequencer( | |||
3075 | cmd->t_task_lba = get_unaligned_be64(&cdb[12]); | 3111 | cmd->t_task_lba = get_unaligned_be64(&cdb[12]); |
3076 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | 3112 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3077 | 3113 | ||
3078 | /* | 3114 | if (target_check_write_same_discard(&cdb[10], dev) < 0) |
3079 | * Skip the remaining assignments for TCM/PSCSI passthrough | ||
3080 | */ | ||
3081 | if (passthrough) | ||
3082 | break; | ||
3083 | |||
3084 | if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { | ||
3085 | pr_err("WRITE_SAME PBDATA and LBDATA" | ||
3086 | " bits not supported for Block Discard" | ||
3087 | " Emulation\n"); | ||
3088 | goto out_invalid_cdb_field; | 3115 | goto out_invalid_cdb_field; |
3089 | } | 3116 | |
3090 | /* | ||
3091 | * Currently for the emulated case we only accept | ||
3092 | * tpws with the UNMAP=1 bit set. | ||
3093 | */ | ||
3094 | if (!(cdb[10] & 0x08)) { | ||
3095 | pr_err("WRITE_SAME w/o UNMAP bit not" | ||
3096 | " supported for Block Discard Emulation\n"); | ||
3097 | goto out_invalid_cdb_field; | ||
3098 | } | ||
3099 | break; | 3117 | break; |
3100 | default: | 3118 | default: |
3101 | pr_err("VARIABLE_LENGTH_CMD service action" | 3119 | pr_err("VARIABLE_LENGTH_CMD service action" |
@@ -3330,10 +3348,12 @@ static int transport_generic_cmd_sequencer( | |||
3330 | cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; | 3348 | cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; |
3331 | /* | 3349 | /* |
3332 | * Check to ensure that LBA + Range does not exceed past end of | 3350 | * Check to ensure that LBA + Range does not exceed past end of |
3333 | * device. | 3351 | * device for IBLOCK and FILEIO ->do_sync_cache() backend calls |
3334 | */ | 3352 | */ |
3335 | if (!transport_cmd_get_valid_sectors(cmd)) | 3353 | if ((cmd->t_task_lba != 0) || (sectors != 0)) { |
3336 | goto out_invalid_cdb_field; | 3354 | if (transport_cmd_get_valid_sectors(cmd) < 0) |
3355 | goto out_invalid_cdb_field; | ||
3356 | } | ||
3337 | break; | 3357 | break; |
3338 | case UNMAP: | 3358 | case UNMAP: |
3339 | size = get_unaligned_be16(&cdb[7]); | 3359 | size = get_unaligned_be16(&cdb[7]); |
@@ -3345,40 +3365,38 @@ static int transport_generic_cmd_sequencer( | |||
3345 | goto out_unsupported_cdb; | 3365 | goto out_unsupported_cdb; |
3346 | 3366 | ||
3347 | if (sectors) | 3367 | if (sectors) |
3348 | size = transport_get_size(sectors, cdb, cmd); | 3368 | size = transport_get_size(1, cdb, cmd); |
3349 | else { | 3369 | else { |
3350 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); | 3370 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); |
3351 | goto out_invalid_cdb_field; | 3371 | goto out_invalid_cdb_field; |
3352 | } | 3372 | } |
3353 | 3373 | ||
3354 | cmd->t_task_lba = get_unaligned_be64(&cdb[2]); | 3374 | cmd->t_task_lba = get_unaligned_be64(&cdb[2]); |
3355 | passthrough = (dev->transport->transport_type == | 3375 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3356 | TRANSPORT_PLUGIN_PHBA_PDEV); | 3376 | |
3357 | /* | 3377 | if (target_check_write_same_discard(&cdb[1], dev) < 0) |
3358 | * Determine if the received WRITE_SAME_16 is used to for direct | 3378 | goto out_invalid_cdb_field; |
3359 | * passthrough into Linux/SCSI with struct request via TCM/pSCSI | 3379 | break; |
3360 | * or we are signaling the use of internal WRITE_SAME + UNMAP=1 | 3380 | case WRITE_SAME: |
3361 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and | 3381 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); |
3362 | * TCM/FILEIO subsystem plugin backstores. | 3382 | if (sector_ret) |
3363 | */ | 3383 | goto out_unsupported_cdb; |
3364 | if (!passthrough) { | 3384 | |
3365 | if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { | 3385 | if (sectors) |
3366 | pr_err("WRITE_SAME PBDATA and LBDATA" | 3386 | size = transport_get_size(1, cdb, cmd); |
3367 | " bits not supported for Block Discard" | 3387 | else { |
3368 | " Emulation\n"); | 3388 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); |
3369 | goto out_invalid_cdb_field; | 3389 | goto out_invalid_cdb_field; |
3370 | } | ||
3371 | /* | ||
3372 | * Currently for the emulated case we only accept | ||
3373 | * tpws with the UNMAP=1 bit set. | ||
3374 | */ | ||
3375 | if (!(cdb[1] & 0x08)) { | ||
3376 | pr_err("WRITE_SAME w/o UNMAP bit not " | ||
3377 | " supported for Block Discard Emulation\n"); | ||
3378 | goto out_invalid_cdb_field; | ||
3379 | } | ||
3380 | } | 3390 | } |
3391 | |||
3392 | cmd->t_task_lba = get_unaligned_be32(&cdb[2]); | ||
3381 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | 3393 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3394 | /* | ||
3395 | * Follow sbcr26 with WRITE_SAME (10) and check for the existence | ||
3396 | * of byte 1 bit 3 UNMAP instead of original reserved field | ||
3397 | */ | ||
3398 | if (target_check_write_same_discard(&cdb[1], dev) < 0) | ||
3399 | goto out_invalid_cdb_field; | ||
3382 | break; | 3400 | break; |
3383 | case ALLOW_MEDIUM_REMOVAL: | 3401 | case ALLOW_MEDIUM_REMOVAL: |
3384 | case GPCMD_CLOSE_TRACK: | 3402 | case GPCMD_CLOSE_TRACK: |
@@ -3873,9 +3891,7 @@ EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); | |||
3873 | static int transport_new_cmd_obj(struct se_cmd *cmd) | 3891 | static int transport_new_cmd_obj(struct se_cmd *cmd) |
3874 | { | 3892 | { |
3875 | struct se_device *dev = cmd->se_dev; | 3893 | struct se_device *dev = cmd->se_dev; |
3876 | u32 task_cdbs; | 3894 | int set_counts = 1, rc, task_cdbs; |
3877 | u32 rc; | ||
3878 | int set_counts = 1; | ||
3879 | 3895 | ||
3880 | /* | 3896 | /* |
3881 | * Setup any BIDI READ tasks and memory from | 3897 | * Setup any BIDI READ tasks and memory from |
@@ -3893,7 +3909,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) | |||
3893 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 3909 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3894 | cmd->scsi_sense_reason = | 3910 | cmd->scsi_sense_reason = |
3895 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 3911 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
3896 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 3912 | return -EINVAL; |
3897 | } | 3913 | } |
3898 | atomic_inc(&cmd->t_fe_count); | 3914 | atomic_inc(&cmd->t_fe_count); |
3899 | atomic_inc(&cmd->t_se_count); | 3915 | atomic_inc(&cmd->t_se_count); |
@@ -3912,7 +3928,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) | |||
3912 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 3928 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3913 | cmd->scsi_sense_reason = | 3929 | cmd->scsi_sense_reason = |
3914 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 3930 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
3915 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 3931 | return -EINVAL; |
3916 | } | 3932 | } |
3917 | 3933 | ||
3918 | if (set_counts) { | 3934 | if (set_counts) { |
@@ -4028,8 +4044,6 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) | |||
4028 | if (!task->task_sg) | 4044 | if (!task->task_sg) |
4029 | continue; | 4045 | continue; |
4030 | 4046 | ||
4031 | BUG_ON(!task->task_padded_sg); | ||
4032 | |||
4033 | if (!sg_first) { | 4047 | if (!sg_first) { |
4034 | sg_first = task->task_sg; | 4048 | sg_first = task->task_sg; |
4035 | chained_nents = task->task_sg_nents; | 4049 | chained_nents = task->task_sg_nents; |
@@ -4037,9 +4051,19 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) | |||
4037 | sg_chain(sg_prev, sg_prev_nents, task->task_sg); | 4051 | sg_chain(sg_prev, sg_prev_nents, task->task_sg); |
4038 | chained_nents += task->task_sg_nents; | 4052 | chained_nents += task->task_sg_nents; |
4039 | } | 4053 | } |
4054 | /* | ||
4055 | * For the padded tasks, use the extra SGL vector allocated | ||
4056 | * in transport_allocate_data_tasks() for the sg_prev_nents | ||
4057 | * offset into sg_chain() above.. The last task of a | ||
4058 | * multi-task list, or a single task will not have | ||
4059 | * task->task_sg_padded set.. | ||
4060 | */ | ||
4061 | if (task->task_padded_sg) | ||
4062 | sg_prev_nents = (task->task_sg_nents + 1); | ||
4063 | else | ||
4064 | sg_prev_nents = task->task_sg_nents; | ||
4040 | 4065 | ||
4041 | sg_prev = task->task_sg; | 4066 | sg_prev = task->task_sg; |
4042 | sg_prev_nents = task->task_sg_nents; | ||
4043 | } | 4067 | } |
4044 | /* | 4068 | /* |
4045 | * Setup the starting pointer and total t_tasks_sg_linked_no including | 4069 | * Setup the starting pointer and total t_tasks_sg_linked_no including |
@@ -4091,7 +4115,7 @@ static int transport_allocate_data_tasks( | |||
4091 | 4115 | ||
4092 | cmd_sg = sgl; | 4116 | cmd_sg = sgl; |
4093 | for (i = 0; i < task_count; i++) { | 4117 | for (i = 0; i < task_count; i++) { |
4094 | unsigned int task_size; | 4118 | unsigned int task_size, task_sg_nents_padded; |
4095 | int count; | 4119 | int count; |
4096 | 4120 | ||
4097 | task = transport_generic_get_task(cmd, data_direction); | 4121 | task = transport_generic_get_task(cmd, data_direction); |
@@ -4110,30 +4134,33 @@ static int transport_allocate_data_tasks( | |||
4110 | 4134 | ||
4111 | /* Update new cdb with updated lba/sectors */ | 4135 | /* Update new cdb with updated lba/sectors */ |
4112 | cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); | 4136 | cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); |
4113 | 4137 | /* | |
4138 | * This now assumes that passed sg_ents are in PAGE_SIZE chunks | ||
4139 | * in order to calculate the number per task SGL entries | ||
4140 | */ | ||
4141 | task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE); | ||
4114 | /* | 4142 | /* |
4115 | * Check if the fabric module driver is requesting that all | 4143 | * Check if the fabric module driver is requesting that all |
4116 | * struct se_task->task_sg[] be chained together.. If so, | 4144 | * struct se_task->task_sg[] be chained together.. If so, |
4117 | * then allocate an extra padding SG entry for linking and | 4145 | * then allocate an extra padding SG entry for linking and |
4118 | * marking the end of the chained SGL. | 4146 | * marking the end of the chained SGL for every task except |
4119 | * Possibly over-allocate task sgl size by using cmd sgl size. | 4147 | * the last one for (task_count > 1) operation, or skipping |
4120 | * It's so much easier and only a waste when task_count > 1. | 4148 | * the extra padding for the (task_count == 1) case. |
4121 | * That is extremely rare. | ||
4122 | */ | 4149 | */ |
4123 | task->task_sg_nents = sgl_nents; | 4150 | if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { |
4124 | if (cmd->se_tfo->task_sg_chaining) { | 4151 | task_sg_nents_padded = (task->task_sg_nents + 1); |
4125 | task->task_sg_nents++; | ||
4126 | task->task_padded_sg = 1; | 4152 | task->task_padded_sg = 1; |
4127 | } | 4153 | } else |
4154 | task_sg_nents_padded = task->task_sg_nents; | ||
4128 | 4155 | ||
4129 | task->task_sg = kmalloc(sizeof(struct scatterlist) * | 4156 | task->task_sg = kmalloc(sizeof(struct scatterlist) * |
4130 | task->task_sg_nents, GFP_KERNEL); | 4157 | task_sg_nents_padded, GFP_KERNEL); |
4131 | if (!task->task_sg) { | 4158 | if (!task->task_sg) { |
4132 | cmd->se_dev->transport->free_task(task); | 4159 | cmd->se_dev->transport->free_task(task); |
4133 | return -ENOMEM; | 4160 | return -ENOMEM; |
4134 | } | 4161 | } |
4135 | 4162 | ||
4136 | sg_init_table(task->task_sg, task->task_sg_nents); | 4163 | sg_init_table(task->task_sg, task_sg_nents_padded); |
4137 | 4164 | ||
4138 | task_size = task->task_size; | 4165 | task_size = task->task_size; |
4139 | 4166 | ||
@@ -4230,10 +4257,13 @@ static u32 transport_allocate_tasks( | |||
4230 | struct scatterlist *sgl, | 4257 | struct scatterlist *sgl, |
4231 | unsigned int sgl_nents) | 4258 | unsigned int sgl_nents) |
4232 | { | 4259 | { |
4233 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) | 4260 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { |
4261 | if (transport_cmd_get_valid_sectors(cmd) < 0) | ||
4262 | return -EINVAL; | ||
4263 | |||
4234 | return transport_allocate_data_tasks(cmd, lba, data_direction, | 4264 | return transport_allocate_data_tasks(cmd, lba, data_direction, |
4235 | sgl, sgl_nents); | 4265 | sgl, sgl_nents); |
4236 | else | 4266 | } else |
4237 | return transport_allocate_control_task(cmd); | 4267 | return transport_allocate_control_task(cmd); |
4238 | 4268 | ||
4239 | } | 4269 | } |
@@ -4726,6 +4756,13 @@ int transport_send_check_condition_and_sense( | |||
4726 | */ | 4756 | */ |
4727 | switch (reason) { | 4757 | switch (reason) { |
4728 | case TCM_NON_EXISTENT_LUN: | 4758 | case TCM_NON_EXISTENT_LUN: |
4759 | /* CURRENT ERROR */ | ||
4760 | buffer[offset] = 0x70; | ||
4761 | /* ILLEGAL REQUEST */ | ||
4762 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | ||
4763 | /* LOGICAL UNIT NOT SUPPORTED */ | ||
4764 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; | ||
4765 | break; | ||
4729 | case TCM_UNSUPPORTED_SCSI_OPCODE: | 4766 | case TCM_UNSUPPORTED_SCSI_OPCODE: |
4730 | case TCM_SECTOR_COUNT_TOO_MANY: | 4767 | case TCM_SECTOR_COUNT_TOO_MANY: |
4731 | /* CURRENT ERROR */ | 4768 | /* CURRENT ERROR */ |
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index bd4fe21a23b8..3749d8b4b423 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h | |||
@@ -98,8 +98,7 @@ struct ft_tpg { | |||
98 | struct list_head list; /* linkage in ft_lport_acl tpg_list */ | 98 | struct list_head list; /* linkage in ft_lport_acl tpg_list */ |
99 | struct list_head lun_list; /* head of LUNs */ | 99 | struct list_head lun_list; /* head of LUNs */ |
100 | struct se_portal_group se_tpg; | 100 | struct se_portal_group se_tpg; |
101 | struct task_struct *thread; /* processing thread */ | 101 | struct workqueue_struct *workqueue; |
102 | struct se_queue_obj qobj; /* queue for processing thread */ | ||
103 | }; | 102 | }; |
104 | 103 | ||
105 | struct ft_lport_acl { | 104 | struct ft_lport_acl { |
@@ -110,16 +109,10 @@ struct ft_lport_acl { | |||
110 | struct se_wwn fc_lport_wwn; | 109 | struct se_wwn fc_lport_wwn; |
111 | }; | 110 | }; |
112 | 111 | ||
113 | enum ft_cmd_state { | ||
114 | FC_CMD_ST_NEW = 0, | ||
115 | FC_CMD_ST_REJ | ||
116 | }; | ||
117 | |||
118 | /* | 112 | /* |
119 | * Commands | 113 | * Commands |
120 | */ | 114 | */ |
121 | struct ft_cmd { | 115 | struct ft_cmd { |
122 | enum ft_cmd_state state; | ||
123 | u32 lun; /* LUN from request */ | 116 | u32 lun; /* LUN from request */ |
124 | struct ft_sess *sess; /* session held for cmd */ | 117 | struct ft_sess *sess; /* session held for cmd */ |
125 | struct fc_seq *seq; /* sequence in exchange mgr */ | 118 | struct fc_seq *seq; /* sequence in exchange mgr */ |
@@ -127,7 +120,7 @@ struct ft_cmd { | |||
127 | struct fc_frame *req_frame; | 120 | struct fc_frame *req_frame; |
128 | unsigned char *cdb; /* pointer to CDB inside frame */ | 121 | unsigned char *cdb; /* pointer to CDB inside frame */ |
129 | u32 write_data_len; /* data received on writes */ | 122 | u32 write_data_len; /* data received on writes */ |
130 | struct se_queue_req se_req; | 123 | struct work_struct work; |
131 | /* Local sense buffer */ | 124 | /* Local sense buffer */ |
132 | unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER]; | 125 | unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER]; |
133 | u32 was_ddp_setup:1; /* Set only if ddp is setup */ | 126 | u32 was_ddp_setup:1; /* Set only if ddp is setup */ |
@@ -177,7 +170,6 @@ int ft_is_state_remove(struct se_cmd *); | |||
177 | /* | 170 | /* |
178 | * other internal functions. | 171 | * other internal functions. |
179 | */ | 172 | */ |
180 | int ft_thread(void *); | ||
181 | void ft_recv_req(struct ft_sess *, struct fc_frame *); | 173 | void ft_recv_req(struct ft_sess *, struct fc_frame *); |
182 | struct ft_tpg *ft_lport_find_tpg(struct fc_lport *); | 174 | struct ft_tpg *ft_lport_find_tpg(struct fc_lport *); |
183 | struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *); | 175 | struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *); |
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 5654dc22f7ae..80fbcde00cb6 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c | |||
@@ -62,8 +62,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) | |||
62 | int count; | 62 | int count; |
63 | 63 | ||
64 | se_cmd = &cmd->se_cmd; | 64 | se_cmd = &cmd->se_cmd; |
65 | pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n", | 65 | pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n", |
66 | caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); | 66 | caller, cmd, cmd->sess, cmd->seq, se_cmd); |
67 | pr_debug("%s: cmd %p cdb %p\n", | 67 | pr_debug("%s: cmd %p cdb %p\n", |
68 | caller, cmd, cmd->cdb); | 68 | caller, cmd, cmd->cdb); |
69 | pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); | 69 | pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); |
@@ -90,38 +90,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) | |||
90 | 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0); | 90 | 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0); |
91 | } | 91 | } |
92 | 92 | ||
93 | static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) | ||
94 | { | ||
95 | struct ft_tpg *tpg = sess->tport->tpg; | ||
96 | struct se_queue_obj *qobj = &tpg->qobj; | ||
97 | unsigned long flags; | ||
98 | |||
99 | qobj = &sess->tport->tpg->qobj; | ||
100 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | ||
101 | list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list); | ||
102 | atomic_inc(&qobj->queue_cnt); | ||
103 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
104 | |||
105 | wake_up_process(tpg->thread); | ||
106 | } | ||
107 | |||
108 | static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj) | ||
109 | { | ||
110 | unsigned long flags; | ||
111 | struct se_queue_req *qr; | ||
112 | |||
113 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | ||
114 | if (list_empty(&qobj->qobj_list)) { | ||
115 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
116 | return NULL; | ||
117 | } | ||
118 | qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list); | ||
119 | list_del(&qr->qr_list); | ||
120 | atomic_dec(&qobj->queue_cnt); | ||
121 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
122 | return container_of(qr, struct ft_cmd, se_req); | ||
123 | } | ||
124 | |||
125 | static void ft_free_cmd(struct ft_cmd *cmd) | 93 | static void ft_free_cmd(struct ft_cmd *cmd) |
126 | { | 94 | { |
127 | struct fc_frame *fp; | 95 | struct fc_frame *fp; |
@@ -282,9 +250,7 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd) | |||
282 | 250 | ||
283 | int ft_get_cmd_state(struct se_cmd *se_cmd) | 251 | int ft_get_cmd_state(struct se_cmd *se_cmd) |
284 | { | 252 | { |
285 | struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); | 253 | return 0; |
286 | |||
287 | return cmd->state; | ||
288 | } | 254 | } |
289 | 255 | ||
290 | int ft_is_state_remove(struct se_cmd *se_cmd) | 256 | int ft_is_state_remove(struct se_cmd *se_cmd) |
@@ -505,6 +471,8 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd) | |||
505 | return 0; | 471 | return 0; |
506 | } | 472 | } |
507 | 473 | ||
474 | static void ft_send_work(struct work_struct *work); | ||
475 | |||
508 | /* | 476 | /* |
509 | * Handle incoming FCP command. | 477 | * Handle incoming FCP command. |
510 | */ | 478 | */ |
@@ -523,7 +491,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) | |||
523 | goto busy; | 491 | goto busy; |
524 | } | 492 | } |
525 | cmd->req_frame = fp; /* hold frame during cmd */ | 493 | cmd->req_frame = fp; /* hold frame during cmd */ |
526 | ft_queue_cmd(sess, cmd); | 494 | |
495 | INIT_WORK(&cmd->work, ft_send_work); | ||
496 | queue_work(sess->tport->tpg->workqueue, &cmd->work); | ||
527 | return; | 497 | return; |
528 | 498 | ||
529 | busy: | 499 | busy: |
@@ -563,12 +533,13 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp) | |||
563 | /* | 533 | /* |
564 | * Send new command to target. | 534 | * Send new command to target. |
565 | */ | 535 | */ |
566 | static void ft_send_cmd(struct ft_cmd *cmd) | 536 | static void ft_send_work(struct work_struct *work) |
567 | { | 537 | { |
538 | struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); | ||
568 | struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); | 539 | struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); |
569 | struct se_cmd *se_cmd; | 540 | struct se_cmd *se_cmd; |
570 | struct fcp_cmnd *fcp; | 541 | struct fcp_cmnd *fcp; |
571 | int data_dir; | 542 | int data_dir = 0; |
572 | u32 data_len; | 543 | u32 data_len; |
573 | int task_attr; | 544 | int task_attr; |
574 | int ret; | 545 | int ret; |
@@ -675,42 +646,3 @@ static void ft_send_cmd(struct ft_cmd *cmd) | |||
675 | err: | 646 | err: |
676 | ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); | 647 | ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); |
677 | } | 648 | } |
678 | |||
679 | /* | ||
680 | * Handle request in the command thread. | ||
681 | */ | ||
682 | static void ft_exec_req(struct ft_cmd *cmd) | ||
683 | { | ||
684 | pr_debug("cmd state %x\n", cmd->state); | ||
685 | switch (cmd->state) { | ||
686 | case FC_CMD_ST_NEW: | ||
687 | ft_send_cmd(cmd); | ||
688 | break; | ||
689 | default: | ||
690 | break; | ||
691 | } | ||
692 | } | ||
693 | |||
694 | /* | ||
695 | * Processing thread. | ||
696 | * Currently one thread per tpg. | ||
697 | */ | ||
698 | int ft_thread(void *arg) | ||
699 | { | ||
700 | struct ft_tpg *tpg = arg; | ||
701 | struct se_queue_obj *qobj = &tpg->qobj; | ||
702 | struct ft_cmd *cmd; | ||
703 | |||
704 | while (!kthread_should_stop()) { | ||
705 | schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); | ||
706 | if (kthread_should_stop()) | ||
707 | goto out; | ||
708 | |||
709 | cmd = ft_dequeue_cmd(qobj); | ||
710 | if (cmd) | ||
711 | ft_exec_req(cmd); | ||
712 | } | ||
713 | |||
714 | out: | ||
715 | return 0; | ||
716 | } | ||
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 8781d1e423df..8fa39b74f22c 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c | |||
@@ -256,7 +256,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) | |||
256 | struct se_portal_group *se_tpg = &tpg->se_tpg; | 256 | struct se_portal_group *se_tpg = &tpg->se_tpg; |
257 | struct se_node_acl *se_acl; | 257 | struct se_node_acl *se_acl; |
258 | 258 | ||
259 | spin_lock_bh(&se_tpg->acl_node_lock); | 259 | spin_lock_irq(&se_tpg->acl_node_lock); |
260 | list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { | 260 | list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { |
261 | acl = container_of(se_acl, struct ft_node_acl, se_node_acl); | 261 | acl = container_of(se_acl, struct ft_node_acl, se_node_acl); |
262 | pr_debug("acl %p port_name %llx\n", | 262 | pr_debug("acl %p port_name %llx\n", |
@@ -270,7 +270,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) | |||
270 | break; | 270 | break; |
271 | } | 271 | } |
272 | } | 272 | } |
273 | spin_unlock_bh(&se_tpg->acl_node_lock); | 273 | spin_unlock_irq(&se_tpg->acl_node_lock); |
274 | return found; | 274 | return found; |
275 | } | 275 | } |
276 | 276 | ||
@@ -327,7 +327,6 @@ static struct se_portal_group *ft_add_tpg( | |||
327 | tpg->index = index; | 327 | tpg->index = index; |
328 | tpg->lport_acl = lacl; | 328 | tpg->lport_acl = lacl; |
329 | INIT_LIST_HEAD(&tpg->lun_list); | 329 | INIT_LIST_HEAD(&tpg->lun_list); |
330 | transport_init_queue_obj(&tpg->qobj); | ||
331 | 330 | ||
332 | ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, | 331 | ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, |
333 | tpg, TRANSPORT_TPG_TYPE_NORMAL); | 332 | tpg, TRANSPORT_TPG_TYPE_NORMAL); |
@@ -336,8 +335,8 @@ static struct se_portal_group *ft_add_tpg( | |||
336 | return NULL; | 335 | return NULL; |
337 | } | 336 | } |
338 | 337 | ||
339 | tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index); | 338 | tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1); |
340 | if (IS_ERR(tpg->thread)) { | 339 | if (!tpg->workqueue) { |
341 | kfree(tpg); | 340 | kfree(tpg); |
342 | return NULL; | 341 | return NULL; |
343 | } | 342 | } |
@@ -356,7 +355,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg) | |||
356 | pr_debug("del tpg %s\n", | 355 | pr_debug("del tpg %s\n", |
357 | config_item_name(&tpg->se_tpg.tpg_group.cg_item)); | 356 | config_item_name(&tpg->se_tpg.tpg_group.cg_item)); |
358 | 357 | ||
359 | kthread_stop(tpg->thread); | 358 | destroy_workqueue(tpg->workqueue); |
360 | 359 | ||
361 | /* Wait for sessions to be freed thru RCU, for BUG_ON below */ | 360 | /* Wait for sessions to be freed thru RCU, for BUG_ON below */ |
362 | synchronize_rcu(); | 361 | synchronize_rcu(); |
@@ -655,9 +654,7 @@ static void __exit ft_exit(void) | |||
655 | synchronize_rcu(); | 654 | synchronize_rcu(); |
656 | } | 655 | } |
657 | 656 | ||
658 | #ifdef MODULE | ||
659 | MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION); | 657 | MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION); |
660 | MODULE_LICENSE("GPL"); | 658 | MODULE_LICENSE("GPL"); |
661 | module_init(ft_init); | 659 | module_init(ft_init); |
662 | module_exit(ft_exit); | 660 | module_exit(ft_exit); |
663 | #endif /* MODULE */ | ||
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index c37f4cd96452..d35ea5a3d56c 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c | |||
@@ -219,43 +219,41 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) | |||
219 | if (cmd->was_ddp_setup) { | 219 | if (cmd->was_ddp_setup) { |
220 | BUG_ON(!ep); | 220 | BUG_ON(!ep); |
221 | BUG_ON(!lport); | 221 | BUG_ON(!lport); |
222 | } | 222 | /* |
223 | 223 | * Since DDP (Large Rx offload) was setup for this request, | |
224 | /* | 224 | * payload is expected to be copied directly to user buffers. |
225 | * Doesn't expect payload if DDP is setup. Payload | 225 | */ |
226 | * is expected to be copied directly to user buffers | 226 | buf = fc_frame_payload_get(fp, 1); |
227 | * due to DDP (Large Rx offload), | 227 | if (buf) |
228 | */ | 228 | pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " |
229 | buf = fc_frame_payload_get(fp, 1); | ||
230 | if (buf) | ||
231 | pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " | ||
232 | "cmd->sg_cnt 0x%x. DDP was setup" | 229 | "cmd->sg_cnt 0x%x. DDP was setup" |
233 | " hence not expected to receive frame with " | 230 | " hence not expected to receive frame with " |
234 | "payload, Frame will be dropped if " | 231 | "payload, Frame will be dropped if" |
235 | "'Sequence Initiative' bit in f_ctl is " | 232 | "'Sequence Initiative' bit in f_ctl is" |
236 | "not set\n", __func__, ep->xid, f_ctl, | 233 | "not set\n", __func__, ep->xid, f_ctl, |
237 | cmd->sg, cmd->sg_cnt); | 234 | cmd->sg, cmd->sg_cnt); |
238 | /* | 235 | /* |
239 | * Invalidate HW DDP context if it was setup for respective | 236 | * Invalidate HW DDP context if it was setup for respective |
240 | * command. Invalidation of HW DDP context is requited in both | 237 | * command. Invalidation of HW DDP context is requited in both |
241 | * situation (success and error). | 238 | * situation (success and error). |
242 | */ | 239 | */ |
243 | ft_invl_hw_context(cmd); | 240 | ft_invl_hw_context(cmd); |
244 | 241 | ||
245 | /* | 242 | /* |
246 | * If "Sequence Initiative (TSI)" bit set in f_ctl, means last | 243 | * If "Sequence Initiative (TSI)" bit set in f_ctl, means last |
247 | * write data frame is received successfully where payload is | 244 | * write data frame is received successfully where payload is |
248 | * posted directly to user buffer and only the last frame's | 245 | * posted directly to user buffer and only the last frame's |
249 | * header is posted in receive queue. | 246 | * header is posted in receive queue. |
250 | * | 247 | * |
251 | * If "Sequence Initiative (TSI)" bit is not set, means error | 248 | * If "Sequence Initiative (TSI)" bit is not set, means error |
252 | * condition w.r.t. DDP, hence drop the packet and let explict | 249 | * condition w.r.t. DDP, hence drop the packet and let explict |
253 | * ABORTS from other end of exchange timer trigger the recovery. | 250 | * ABORTS from other end of exchange timer trigger the recovery. |
254 | */ | 251 | */ |
255 | if (f_ctl & FC_FC_SEQ_INIT) | 252 | if (f_ctl & FC_FC_SEQ_INIT) |
256 | goto last_frame; | 253 | goto last_frame; |
257 | else | 254 | else |
258 | goto drop; | 255 | goto drop; |
256 | } | ||
259 | 257 | ||
260 | rel_off = ntohl(fh->fh_parm_offset); | 258 | rel_off = ntohl(fh->fh_parm_offset); |
261 | frame_len = fr_len(fp); | 259 | frame_len = fr_len(fp); |
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 98b6e3bdb000..e809e9d4683c 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c | |||
@@ -446,8 +446,19 @@ static inline void legacy_pty_init(void) { } | |||
446 | int pty_limit = NR_UNIX98_PTY_DEFAULT; | 446 | int pty_limit = NR_UNIX98_PTY_DEFAULT; |
447 | static int pty_limit_min; | 447 | static int pty_limit_min; |
448 | static int pty_limit_max = NR_UNIX98_PTY_MAX; | 448 | static int pty_limit_max = NR_UNIX98_PTY_MAX; |
449 | static int tty_count; | ||
449 | static int pty_count; | 450 | static int pty_count; |
450 | 451 | ||
452 | static inline void pty_inc_count(void) | ||
453 | { | ||
454 | pty_count = (++tty_count) / 2; | ||
455 | } | ||
456 | |||
457 | static inline void pty_dec_count(void) | ||
458 | { | ||
459 | pty_count = (--tty_count) / 2; | ||
460 | } | ||
461 | |||
451 | static struct cdev ptmx_cdev; | 462 | static struct cdev ptmx_cdev; |
452 | 463 | ||
453 | static struct ctl_table pty_table[] = { | 464 | static struct ctl_table pty_table[] = { |
@@ -542,6 +553,7 @@ static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver, | |||
542 | 553 | ||
543 | static void pty_unix98_shutdown(struct tty_struct *tty) | 554 | static void pty_unix98_shutdown(struct tty_struct *tty) |
544 | { | 555 | { |
556 | tty_driver_remove_tty(tty->driver, tty); | ||
545 | /* We have our own method as we don't use the tty index */ | 557 | /* We have our own method as we don't use the tty index */ |
546 | kfree(tty->termios); | 558 | kfree(tty->termios); |
547 | } | 559 | } |
@@ -588,7 +600,8 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty) | |||
588 | */ | 600 | */ |
589 | tty_driver_kref_get(driver); | 601 | tty_driver_kref_get(driver); |
590 | tty->count++; | 602 | tty->count++; |
591 | pty_count++; | 603 | pty_inc_count(); /* tty */ |
604 | pty_inc_count(); /* tty->link */ | ||
592 | return 0; | 605 | return 0; |
593 | err_free_mem: | 606 | err_free_mem: |
594 | deinitialize_tty_struct(o_tty); | 607 | deinitialize_tty_struct(o_tty); |
@@ -602,7 +615,7 @@ err_free_tty: | |||
602 | 615 | ||
603 | static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) | 616 | static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) |
604 | { | 617 | { |
605 | pty_count--; | 618 | pty_dec_count(); |
606 | } | 619 | } |
607 | 620 | ||
608 | static const struct tty_operations ptm_unix98_ops = { | 621 | static const struct tty_operations ptm_unix98_ops = { |
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c index f2dfec82faf8..7f50999eebc2 100644 --- a/drivers/tty/serial/8250.c +++ b/drivers/tty/serial/8250.c | |||
@@ -1819,6 +1819,8 @@ static void serial8250_backup_timeout(unsigned long data) | |||
1819 | unsigned int iir, ier = 0, lsr; | 1819 | unsigned int iir, ier = 0, lsr; |
1820 | unsigned long flags; | 1820 | unsigned long flags; |
1821 | 1821 | ||
1822 | spin_lock_irqsave(&up->port.lock, flags); | ||
1823 | |||
1822 | /* | 1824 | /* |
1823 | * Must disable interrupts or else we risk racing with the interrupt | 1825 | * Must disable interrupts or else we risk racing with the interrupt |
1824 | * based handler. | 1826 | * based handler. |
@@ -1836,10 +1838,8 @@ static void serial8250_backup_timeout(unsigned long data) | |||
1836 | * the "Diva" UART used on the management processor on many HP | 1838 | * the "Diva" UART used on the management processor on many HP |
1837 | * ia64 and parisc boxes. | 1839 | * ia64 and parisc boxes. |
1838 | */ | 1840 | */ |
1839 | spin_lock_irqsave(&up->port.lock, flags); | ||
1840 | lsr = serial_in(up, UART_LSR); | 1841 | lsr = serial_in(up, UART_LSR); |
1841 | up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; | 1842 | up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; |
1842 | spin_unlock_irqrestore(&up->port.lock, flags); | ||
1843 | if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) && | 1843 | if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) && |
1844 | (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) && | 1844 | (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) && |
1845 | (lsr & UART_LSR_THRE)) { | 1845 | (lsr & UART_LSR_THRE)) { |
@@ -1848,11 +1848,13 @@ static void serial8250_backup_timeout(unsigned long data) | |||
1848 | } | 1848 | } |
1849 | 1849 | ||
1850 | if (!(iir & UART_IIR_NO_INT)) | 1850 | if (!(iir & UART_IIR_NO_INT)) |
1851 | serial8250_handle_port(up); | 1851 | transmit_chars(up); |
1852 | 1852 | ||
1853 | if (is_real_interrupt(up->port.irq)) | 1853 | if (is_real_interrupt(up->port.irq)) |
1854 | serial_out(up, UART_IER, ier); | 1854 | serial_out(up, UART_IER, ier); |
1855 | 1855 | ||
1856 | spin_unlock_irqrestore(&up->port.lock, flags); | ||
1857 | |||
1856 | /* Standard timer interval plus 0.2s to keep the port running */ | 1858 | /* Standard timer interval plus 0.2s to keep the port running */ |
1857 | mod_timer(&up->timer, | 1859 | mod_timer(&up->timer, |
1858 | jiffies + uart_poll_timeout(&up->port) + HZ / 5); | 1860 | jiffies + uart_poll_timeout(&up->port) + HZ / 5); |
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c index 6b887d90a205..3abeca2a2a1b 100644 --- a/drivers/tty/serial/8250_pci.c +++ b/drivers/tty/serial/8250_pci.c | |||
@@ -1599,11 +1599,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { | |||
1599 | .device = 0x800D, | 1599 | .device = 0x800D, |
1600 | .init = pci_eg20t_init, | 1600 | .init = pci_eg20t_init, |
1601 | }, | 1601 | }, |
1602 | { | ||
1603 | .vendor = 0x10DB, | ||
1604 | .device = 0x800D, | ||
1605 | .init = pci_eg20t_init, | ||
1606 | }, | ||
1607 | /* | 1602 | /* |
1608 | * Cronyx Omega PCI (PLX-chip based) | 1603 | * Cronyx Omega PCI (PLX-chip based) |
1609 | */ | 1604 | */ |
@@ -4021,7 +4016,7 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
4021 | 0, 0, pbn_NETMOS9900_2s_115200 }, | 4016 | 0, 0, pbn_NETMOS9900_2s_115200 }, |
4022 | 4017 | ||
4023 | /* | 4018 | /* |
4024 | * Best Connectivity PCI Multi I/O cards | 4019 | * Best Connectivity and Rosewill PCI Multi I/O cards |
4025 | */ | 4020 | */ |
4026 | 4021 | ||
4027 | { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, | 4022 | { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, |
@@ -4029,6 +4024,10 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
4029 | 0, 0, pbn_b0_1_115200 }, | 4024 | 0, 0, pbn_b0_1_115200 }, |
4030 | 4025 | ||
4031 | { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, | 4026 | { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, |
4027 | 0xA000, 0x3002, | ||
4028 | 0, 0, pbn_b0_bt_2_115200 }, | ||
4029 | |||
4030 | { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, | ||
4032 | 0xA000, 0x3004, | 4031 | 0xA000, 0x3004, |
4033 | 0, 0, pbn_b0_bt_4_115200 }, | 4032 | 0, 0, pbn_b0_bt_4_115200 }, |
4034 | /* Intel CE4100 */ | 4033 | /* Intel CE4100 */ |
diff --git a/drivers/tty/serial/8250_pnp.c b/drivers/tty/serial/8250_pnp.c index fc301f6722e1..a2f236510ff1 100644 --- a/drivers/tty/serial/8250_pnp.c +++ b/drivers/tty/serial/8250_pnp.c | |||
@@ -109,6 +109,9 @@ static const struct pnp_device_id pnp_dev_table[] = { | |||
109 | /* IBM */ | 109 | /* IBM */ |
110 | /* IBM Thinkpad 701 Internal Modem Voice */ | 110 | /* IBM Thinkpad 701 Internal Modem Voice */ |
111 | { "IBM0033", 0 }, | 111 | { "IBM0033", 0 }, |
112 | /* Intermec */ | ||
113 | /* Intermec CV60 touchscreen port */ | ||
114 | { "PNP4972", 0 }, | ||
112 | /* Intertex */ | 115 | /* Intertex */ |
113 | /* Intertex 28k8 33k6 Voice EXT PnP */ | 116 | /* Intertex 28k8 33k6 Voice EXT PnP */ |
114 | { "IXDC801", 0 }, | 117 | { "IXDC801", 0 }, |
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index af9b7814965a..b922f5d2e61e 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c | |||
@@ -1609,9 +1609,11 @@ static struct console atmel_console = { | |||
1609 | static int __init atmel_console_init(void) | 1609 | static int __init atmel_console_init(void) |
1610 | { | 1610 | { |
1611 | if (atmel_default_console_device) { | 1611 | if (atmel_default_console_device) { |
1612 | add_preferred_console(ATMEL_DEVICENAME, | 1612 | struct atmel_uart_data *pdata = |
1613 | atmel_default_console_device->id, NULL); | 1613 | atmel_default_console_device->dev.platform_data; |
1614 | atmel_init_port(&atmel_ports[atmel_default_console_device->id], | 1614 | |
1615 | add_preferred_console(ATMEL_DEVICENAME, pdata->num, NULL); | ||
1616 | atmel_init_port(&atmel_ports[pdata->num], | ||
1615 | atmel_default_console_device); | 1617 | atmel_default_console_device); |
1616 | register_console(&atmel_console); | 1618 | register_console(&atmel_console); |
1617 | } | 1619 | } |
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c index 225123b37f19..58be715913cd 100644 --- a/drivers/tty/serial/crisv10.c +++ b/drivers/tty/serial/crisv10.c | |||
@@ -4450,7 +4450,7 @@ static int __init rs_init(void) | |||
4450 | 4450 | ||
4451 | #if defined(CONFIG_ETRAX_RS485) | 4451 | #if defined(CONFIG_ETRAX_RS485) |
4452 | #if defined(CONFIG_ETRAX_RS485_ON_PA) | 4452 | #if defined(CONFIG_ETRAX_RS485_ON_PA) |
4453 | if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit, | 4453 | if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit, |
4454 | rs485_pa_bit)) { | 4454 | rs485_pa_bit)) { |
4455 | printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " | 4455 | printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " |
4456 | "RS485 pin\n"); | 4456 | "RS485 pin\n"); |
@@ -4459,7 +4459,7 @@ static int __init rs_init(void) | |||
4459 | } | 4459 | } |
4460 | #endif | 4460 | #endif |
4461 | #if defined(CONFIG_ETRAX_RS485_ON_PORT_G) | 4461 | #if defined(CONFIG_ETRAX_RS485_ON_PORT_G) |
4462 | if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit, | 4462 | if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit, |
4463 | rs485_port_g_bit)) { | 4463 | rs485_port_g_bit)) { |
4464 | printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " | 4464 | printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " |
4465 | "RS485 pin\n"); | 4465 | "RS485 pin\n"); |
diff --git a/drivers/tty/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c index a1fe304f2f52..d73aadd7a9ad 100644 --- a/drivers/tty/serial/max3107-aava.c +++ b/drivers/tty/serial/max3107-aava.c | |||
@@ -340,5 +340,5 @@ module_exit(max3107_exit); | |||
340 | 340 | ||
341 | MODULE_DESCRIPTION("MAX3107 driver"); | 341 | MODULE_DESCRIPTION("MAX3107 driver"); |
342 | MODULE_AUTHOR("Aavamobile"); | 342 | MODULE_AUTHOR("Aavamobile"); |
343 | MODULE_ALIAS("aava-max3107-spi"); | 343 | MODULE_ALIAS("spi:aava-max3107"); |
344 | MODULE_LICENSE("GPL v2"); | 344 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c index 750b4f627315..a8164601c0ea 100644 --- a/drivers/tty/serial/max3107.c +++ b/drivers/tty/serial/max3107.c | |||
@@ -1209,5 +1209,5 @@ module_exit(max3107_exit); | |||
1209 | 1209 | ||
1210 | MODULE_DESCRIPTION("MAX3107 driver"); | 1210 | MODULE_DESCRIPTION("MAX3107 driver"); |
1211 | MODULE_AUTHOR("Aavamobile"); | 1211 | MODULE_AUTHOR("Aavamobile"); |
1212 | MODULE_ALIAS("max3107-spi"); | 1212 | MODULE_ALIAS("spi:max3107"); |
1213 | MODULE_LICENSE("GPL v2"); | 1213 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c index a764bf99743b..23bc743f2a22 100644 --- a/drivers/tty/serial/mrst_max3110.c +++ b/drivers/tty/serial/mrst_max3110.c | |||
@@ -917,4 +917,4 @@ module_init(serial_m3110_init); | |||
917 | module_exit(serial_m3110_exit); | 917 | module_exit(serial_m3110_exit); |
918 | 918 | ||
919 | MODULE_LICENSE("GPL v2"); | 919 | MODULE_LICENSE("GPL v2"); |
920 | MODULE_ALIAS("max3110-uart"); | 920 | MODULE_ALIAS("spi:max3110-uart"); |
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index c37df8d0fa28..5e713d3ef1f4 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c | |||
@@ -806,8 +806,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios, | |||
806 | 806 | ||
807 | serial_omap_set_mctrl(&up->port, up->port.mctrl); | 807 | serial_omap_set_mctrl(&up->port, up->port.mctrl); |
808 | /* Software Flow Control Configuration */ | 808 | /* Software Flow Control Configuration */ |
809 | if (termios->c_iflag & (IXON | IXOFF)) | 809 | serial_omap_configure_xonxoff(up, termios); |
810 | serial_omap_configure_xonxoff(up, termios); | ||
811 | 810 | ||
812 | spin_unlock_irqrestore(&up->port.lock, flags); | 811 | spin_unlock_irqrestore(&up->port.lock, flags); |
813 | dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id); | 812 | dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id); |
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index 846dfcd3ce0d..b46218d679e2 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c | |||
@@ -598,7 +598,8 @@ static void pch_request_dma(struct uart_port *port) | |||
598 | dma_cap_zero(mask); | 598 | dma_cap_zero(mask); |
599 | dma_cap_set(DMA_SLAVE, mask); | 599 | dma_cap_set(DMA_SLAVE, mask); |
600 | 600 | ||
601 | dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(0xa, 0)); /* Get DMA's dev | 601 | dma_dev = pci_get_bus_and_slot(priv->pdev->bus->number, |
602 | PCI_DEVFN(0xa, 0)); /* Get DMA's dev | ||
602 | information */ | 603 | information */ |
603 | /* Set Tx DMA */ | 604 | /* Set Tx DMA */ |
604 | param = &priv->param_tx; | 605 | param = &priv->param_tx; |
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index afc629423152..6edafb5ace18 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c | |||
@@ -1225,15 +1225,19 @@ static const struct dev_pm_ops s3c24xx_serial_pm_ops = { | |||
1225 | .suspend = s3c24xx_serial_suspend, | 1225 | .suspend = s3c24xx_serial_suspend, |
1226 | .resume = s3c24xx_serial_resume, | 1226 | .resume = s3c24xx_serial_resume, |
1227 | }; | 1227 | }; |
1228 | #define SERIAL_SAMSUNG_PM_OPS (&s3c24xx_serial_pm_ops) | ||
1229 | |||
1228 | #else /* !CONFIG_PM_SLEEP */ | 1230 | #else /* !CONFIG_PM_SLEEP */ |
1229 | #define s3c24xx_serial_pm_ops NULL | 1231 | |
1232 | #define SERIAL_SAMSUNG_PM_OPS NULL | ||
1230 | #endif /* CONFIG_PM_SLEEP */ | 1233 | #endif /* CONFIG_PM_SLEEP */ |
1231 | 1234 | ||
1232 | int s3c24xx_serial_init(struct platform_driver *drv, | 1235 | int s3c24xx_serial_init(struct platform_driver *drv, |
1233 | struct s3c24xx_uart_info *info) | 1236 | struct s3c24xx_uart_info *info) |
1234 | { | 1237 | { |
1235 | dbg("s3c24xx_serial_init(%p,%p)\n", drv, info); | 1238 | dbg("s3c24xx_serial_init(%p,%p)\n", drv, info); |
1236 | drv->driver.pm = &s3c24xx_serial_pm_ops; | 1239 | |
1240 | drv->driver.pm = SERIAL_SAMSUNG_PM_OPS; | ||
1237 | 1241 | ||
1238 | return platform_driver_register(drv); | 1242 | return platform_driver_register(drv); |
1239 | } | 1243 | } |
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index db7912cb7ae0..a3efbea5dbba 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c | |||
@@ -200,6 +200,11 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state, int in | |||
200 | clear_bit(TTY_IO_ERROR, &tty->flags); | 200 | clear_bit(TTY_IO_ERROR, &tty->flags); |
201 | } | 201 | } |
202 | 202 | ||
203 | /* | ||
204 | * This is to allow setserial on this port. People may want to set | ||
205 | * port/irq/type and then reconfigure the port properly if it failed | ||
206 | * now. | ||
207 | */ | ||
203 | if (retval && capable(CAP_SYS_ADMIN)) | 208 | if (retval && capable(CAP_SYS_ADMIN)) |
204 | retval = 0; | 209 | retval = 0; |
205 | 210 | ||
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 2ec57b2fb278..5ea6ec3442e6 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/ctype.h> | 47 | #include <linux/ctype.h> |
48 | #include <linux/err.h> | 48 | #include <linux/err.h> |
49 | #include <linux/dmaengine.h> | 49 | #include <linux/dmaengine.h> |
50 | #include <linux/dma-mapping.h> | ||
50 | #include <linux/scatterlist.h> | 51 | #include <linux/scatterlist.h> |
51 | #include <linux/slab.h> | 52 | #include <linux/slab.h> |
52 | 53 | ||
@@ -95,6 +96,12 @@ struct sci_port { | |||
95 | #endif | 96 | #endif |
96 | 97 | ||
97 | struct notifier_block freq_transition; | 98 | struct notifier_block freq_transition; |
99 | |||
100 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE | ||
101 | unsigned short saved_smr; | ||
102 | unsigned short saved_fcr; | ||
103 | unsigned char saved_brr; | ||
104 | #endif | ||
98 | }; | 105 | }; |
99 | 106 | ||
100 | /* Function prototypes */ | 107 | /* Function prototypes */ |
@@ -1076,7 +1083,7 @@ static unsigned int sci_get_mctrl(struct uart_port *port) | |||
1076 | /* This routine is used for getting signals of: DTR, DCD, DSR, RI, | 1083 | /* This routine is used for getting signals of: DTR, DCD, DSR, RI, |
1077 | and CTS/RTS */ | 1084 | and CTS/RTS */ |
1078 | 1085 | ||
1079 | return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR; | 1086 | return TIOCM_DTR | TIOCM_RTS | TIOCM_CTS | TIOCM_DSR; |
1080 | } | 1087 | } |
1081 | 1088 | ||
1082 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | 1089 | #ifdef CONFIG_SERIAL_SH_SCI_DMA |
@@ -1633,11 +1640,25 @@ static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps, | |||
1633 | return ((freq + 16 * bps) / (32 * bps) - 1); | 1640 | return ((freq + 16 * bps) / (32 * bps) - 1); |
1634 | } | 1641 | } |
1635 | 1642 | ||
1643 | static void sci_reset(struct uart_port *port) | ||
1644 | { | ||
1645 | unsigned int status; | ||
1646 | |||
1647 | do { | ||
1648 | status = sci_in(port, SCxSR); | ||
1649 | } while (!(status & SCxSR_TEND(port))); | ||
1650 | |||
1651 | sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */ | ||
1652 | |||
1653 | if (port->type != PORT_SCI) | ||
1654 | sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST); | ||
1655 | } | ||
1656 | |||
1636 | static void sci_set_termios(struct uart_port *port, struct ktermios *termios, | 1657 | static void sci_set_termios(struct uart_port *port, struct ktermios *termios, |
1637 | struct ktermios *old) | 1658 | struct ktermios *old) |
1638 | { | 1659 | { |
1639 | struct sci_port *s = to_sci_port(port); | 1660 | struct sci_port *s = to_sci_port(port); |
1640 | unsigned int status, baud, smr_val, max_baud; | 1661 | unsigned int baud, smr_val, max_baud; |
1641 | int t = -1; | 1662 | int t = -1; |
1642 | u16 scfcr = 0; | 1663 | u16 scfcr = 0; |
1643 | 1664 | ||
@@ -1657,14 +1678,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, | |||
1657 | 1678 | ||
1658 | sci_port_enable(s); | 1679 | sci_port_enable(s); |
1659 | 1680 | ||
1660 | do { | 1681 | sci_reset(port); |
1661 | status = sci_in(port, SCxSR); | ||
1662 | } while (!(status & SCxSR_TEND(port))); | ||
1663 | |||
1664 | sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */ | ||
1665 | |||
1666 | if (port->type != PORT_SCI) | ||
1667 | sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST); | ||
1668 | 1682 | ||
1669 | smr_val = sci_in(port, SCSMR) & 3; | 1683 | smr_val = sci_in(port, SCSMR) & 3; |
1670 | 1684 | ||
@@ -1913,6 +1927,7 @@ static int __devinit sci_init_single(struct platform_device *dev, | |||
1913 | 1927 | ||
1914 | port->dev = &dev->dev; | 1928 | port->dev = &dev->dev; |
1915 | 1929 | ||
1930 | pm_runtime_irq_safe(&dev->dev); | ||
1916 | pm_runtime_enable(&dev->dev); | 1931 | pm_runtime_enable(&dev->dev); |
1917 | } | 1932 | } |
1918 | 1933 | ||
@@ -2036,7 +2051,8 @@ static int __devinit serial_console_setup(struct console *co, char *options) | |||
2036 | if (options) | 2051 | if (options) |
2037 | uart_parse_options(options, &baud, &parity, &bits, &flow); | 2052 | uart_parse_options(options, &baud, &parity, &bits, &flow); |
2038 | 2053 | ||
2039 | /* TODO: disable clock */ | 2054 | sci_port_disable(sci_port); |
2055 | |||
2040 | return uart_set_options(port, co, baud, parity, bits, flow); | 2056 | return uart_set_options(port, co, baud, parity, bits, flow); |
2041 | } | 2057 | } |
2042 | 2058 | ||
@@ -2079,6 +2095,36 @@ static int __devinit sci_probe_earlyprintk(struct platform_device *pdev) | |||
2079 | return 0; | 2095 | return 0; |
2080 | } | 2096 | } |
2081 | 2097 | ||
2098 | #define uart_console(port) ((port)->cons->index == (port)->line) | ||
2099 | |||
2100 | static int sci_runtime_suspend(struct device *dev) | ||
2101 | { | ||
2102 | struct sci_port *sci_port = dev_get_drvdata(dev); | ||
2103 | struct uart_port *port = &sci_port->port; | ||
2104 | |||
2105 | if (uart_console(port)) { | ||
2106 | sci_port->saved_smr = sci_in(port, SCSMR); | ||
2107 | sci_port->saved_brr = sci_in(port, SCBRR); | ||
2108 | sci_port->saved_fcr = sci_in(port, SCFCR); | ||
2109 | } | ||
2110 | return 0; | ||
2111 | } | ||
2112 | |||
2113 | static int sci_runtime_resume(struct device *dev) | ||
2114 | { | ||
2115 | struct sci_port *sci_port = dev_get_drvdata(dev); | ||
2116 | struct uart_port *port = &sci_port->port; | ||
2117 | |||
2118 | if (uart_console(port)) { | ||
2119 | sci_reset(port); | ||
2120 | sci_out(port, SCSMR, sci_port->saved_smr); | ||
2121 | sci_out(port, SCBRR, sci_port->saved_brr); | ||
2122 | sci_out(port, SCFCR, sci_port->saved_fcr); | ||
2123 | sci_out(port, SCSCR, sci_port->cfg->scscr); | ||
2124 | } | ||
2125 | return 0; | ||
2126 | } | ||
2127 | |||
2082 | #define SCI_CONSOLE (&serial_console) | 2128 | #define SCI_CONSOLE (&serial_console) |
2083 | 2129 | ||
2084 | #else | 2130 | #else |
@@ -2088,6 +2134,8 @@ static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev) | |||
2088 | } | 2134 | } |
2089 | 2135 | ||
2090 | #define SCI_CONSOLE NULL | 2136 | #define SCI_CONSOLE NULL |
2137 | #define sci_runtime_suspend NULL | ||
2138 | #define sci_runtime_resume NULL | ||
2091 | 2139 | ||
2092 | #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ | 2140 | #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ |
2093 | 2141 | ||
@@ -2203,6 +2251,8 @@ static int sci_resume(struct device *dev) | |||
2203 | } | 2251 | } |
2204 | 2252 | ||
2205 | static const struct dev_pm_ops sci_dev_pm_ops = { | 2253 | static const struct dev_pm_ops sci_dev_pm_ops = { |
2254 | .runtime_suspend = sci_runtime_suspend, | ||
2255 | .runtime_resume = sci_runtime_resume, | ||
2206 | .suspend = sci_suspend, | 2256 | .suspend = sci_suspend, |
2207 | .resume = sci_resume, | 2257 | .resume = sci_resume, |
2208 | }; | 2258 | }; |
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c index c327218cad44..9af9f0879a24 100644 --- a/drivers/tty/serial/ucc_uart.c +++ b/drivers/tty/serial/ucc_uart.c | |||
@@ -235,7 +235,7 @@ static inline void *qe2cpu_addr(dma_addr_t addr, struct uart_qe_port *qe_port) | |||
235 | return qe_port->bd_virt + (addr - qe_port->bd_dma_addr); | 235 | return qe_port->bd_virt + (addr - qe_port->bd_dma_addr); |
236 | 236 | ||
237 | /* something nasty happened */ | 237 | /* something nasty happened */ |
238 | printk(KERN_ERR "%s: addr=%x\n", __func__, addr); | 238 | printk(KERN_ERR "%s: addr=%llx\n", __func__, (u64)addr); |
239 | BUG(); | 239 | BUG(); |
240 | return NULL; | 240 | return NULL; |
241 | } | 241 | } |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 150e4f747c7d..4f1fc81112e6 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
@@ -1295,8 +1295,7 @@ static int tty_driver_install_tty(struct tty_driver *driver, | |||
1295 | * | 1295 | * |
1296 | * Locking: tty_mutex for now | 1296 | * Locking: tty_mutex for now |
1297 | */ | 1297 | */ |
1298 | static void tty_driver_remove_tty(struct tty_driver *driver, | 1298 | void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty) |
1299 | struct tty_struct *tty) | ||
1300 | { | 1299 | { |
1301 | if (driver->ops->remove) | 1300 | if (driver->ops->remove) |
1302 | driver->ops->remove(driver, tty); | 1301 | driver->ops->remove(driver, tty); |
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 8669ba3fe794..73cbbd85219f 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
@@ -1775,6 +1775,8 @@ int usb_hcd_alloc_bandwidth(struct usb_device *udev, | |||
1775 | struct usb_interface *iface = usb_ifnum_to_if(udev, | 1775 | struct usb_interface *iface = usb_ifnum_to_if(udev, |
1776 | cur_alt->desc.bInterfaceNumber); | 1776 | cur_alt->desc.bInterfaceNumber); |
1777 | 1777 | ||
1778 | if (!iface) | ||
1779 | return -EINVAL; | ||
1778 | if (iface->resetting_device) { | 1780 | if (iface->resetting_device) { |
1779 | /* | 1781 | /* |
1780 | * The USB core just reset the device, so the xHCI host | 1782 | * The USB core just reset the device, so the xHCI host |
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c index 8f8d3f6cd89e..8f3eab1af885 100644 --- a/drivers/usb/gadget/f_phonet.c +++ b/drivers/usb/gadget/f_phonet.c | |||
@@ -434,6 +434,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt) | |||
434 | config_ep_by_speed(gadget, f, fp->out_ep)) { | 434 | config_ep_by_speed(gadget, f, fp->out_ep)) { |
435 | fp->in_ep->desc = NULL; | 435 | fp->in_ep->desc = NULL; |
436 | fp->out_ep->desc = NULL; | 436 | fp->out_ep->desc = NULL; |
437 | spin_unlock(&port->lock); | ||
437 | return -EINVAL; | 438 | return -EINVAL; |
438 | } | 439 | } |
439 | usb_ep_enable(fp->out_ep); | 440 | usb_ep_enable(fp->out_ep); |
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index e051b30c1847..4c32cb19b405 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
@@ -343,7 +343,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd) | |||
343 | u32 temp; | 343 | u32 temp; |
344 | u32 power_okay; | 344 | u32 power_okay; |
345 | int i; | 345 | int i; |
346 | u8 resume_needed = 0; | 346 | unsigned long resume_needed = 0; |
347 | 347 | ||
348 | if (time_before (jiffies, ehci->next_statechange)) | 348 | if (time_before (jiffies, ehci->next_statechange)) |
349 | msleep(5); | 349 | msleep(5); |
@@ -416,7 +416,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd) | |||
416 | if (test_bit(i, &ehci->bus_suspended) && | 416 | if (test_bit(i, &ehci->bus_suspended) && |
417 | (temp & PORT_SUSPEND)) { | 417 | (temp & PORT_SUSPEND)) { |
418 | temp |= PORT_RESUME; | 418 | temp |= PORT_RESUME; |
419 | resume_needed = 1; | 419 | set_bit(i, &resume_needed); |
420 | } | 420 | } |
421 | ehci_writel(ehci, temp, &ehci->regs->port_status [i]); | 421 | ehci_writel(ehci, temp, &ehci->regs->port_status [i]); |
422 | } | 422 | } |
@@ -431,8 +431,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd) | |||
431 | i = HCS_N_PORTS (ehci->hcs_params); | 431 | i = HCS_N_PORTS (ehci->hcs_params); |
432 | while (i--) { | 432 | while (i--) { |
433 | temp = ehci_readl(ehci, &ehci->regs->port_status [i]); | 433 | temp = ehci_readl(ehci, &ehci->regs->port_status [i]); |
434 | if (test_bit(i, &ehci->bus_suspended) && | 434 | if (test_bit(i, &resume_needed)) { |
435 | (temp & PORT_SUSPEND)) { | ||
436 | temp &= ~(PORT_RWC_BITS | PORT_RESUME); | 435 | temp &= ~(PORT_RWC_BITS | PORT_RESUME); |
437 | ehci_writel(ehci, temp, &ehci->regs->port_status [i]); | 436 | ehci_writel(ehci, temp, &ehci->regs->port_status [i]); |
438 | ehci_vdbg (ehci, "resumed port %d\n", i + 1); | 437 | ehci_vdbg (ehci, "resumed port %d\n", i + 1); |
diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c index b3958b3d3163..9e77f1c8bdbd 100644 --- a/drivers/usb/host/ehci-s5p.c +++ b/drivers/usb/host/ehci-s5p.c | |||
@@ -86,6 +86,7 @@ static int __devinit s5p_ehci_probe(struct platform_device *pdev) | |||
86 | goto fail_hcd; | 86 | goto fail_hcd; |
87 | } | 87 | } |
88 | 88 | ||
89 | s5p_ehci->hcd = hcd; | ||
89 | s5p_ehci->clk = clk_get(&pdev->dev, "usbhost"); | 90 | s5p_ehci->clk = clk_get(&pdev->dev, "usbhost"); |
90 | 91 | ||
91 | if (IS_ERR(s5p_ehci->clk)) { | 92 | if (IS_ERR(s5p_ehci->clk)) { |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 0be788cc2fdb..723f8231193d 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -463,11 +463,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
463 | && (temp & PORT_POWER)) | 463 | && (temp & PORT_POWER)) |
464 | status |= USB_PORT_STAT_SUSPEND; | 464 | status |= USB_PORT_STAT_SUSPEND; |
465 | } | 465 | } |
466 | if ((temp & PORT_PLS_MASK) == XDEV_RESUME) { | 466 | if ((temp & PORT_PLS_MASK) == XDEV_RESUME && |
467 | !DEV_SUPERSPEED(temp)) { | ||
467 | if ((temp & PORT_RESET) || !(temp & PORT_PE)) | 468 | if ((temp & PORT_RESET) || !(temp & PORT_PE)) |
468 | goto error; | 469 | goto error; |
469 | if (!DEV_SUPERSPEED(temp) && time_after_eq(jiffies, | 470 | if (time_after_eq(jiffies, |
470 | bus_state->resume_done[wIndex])) { | 471 | bus_state->resume_done[wIndex])) { |
471 | xhci_dbg(xhci, "Resume USB2 port %d\n", | 472 | xhci_dbg(xhci, "Resume USB2 port %d\n", |
472 | wIndex + 1); | 473 | wIndex + 1); |
473 | bus_state->resume_done[wIndex] = 0; | 474 | bus_state->resume_done[wIndex] = 0; |
@@ -487,6 +488,14 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
487 | xhci_ring_device(xhci, slot_id); | 488 | xhci_ring_device(xhci, slot_id); |
488 | bus_state->port_c_suspend |= 1 << wIndex; | 489 | bus_state->port_c_suspend |= 1 << wIndex; |
489 | bus_state->suspended_ports &= ~(1 << wIndex); | 490 | bus_state->suspended_ports &= ~(1 << wIndex); |
491 | } else { | ||
492 | /* | ||
493 | * The resume has been signaling for less than | ||
494 | * 20ms. Report the port status as SUSPEND, | ||
495 | * let the usbcore check port status again | ||
496 | * and clear resume signaling later. | ||
497 | */ | ||
498 | status |= USB_PORT_STAT_SUSPEND; | ||
490 | } | 499 | } |
491 | } | 500 | } |
492 | if ((temp & PORT_PLS_MASK) == XDEV_U0 | 501 | if ((temp & PORT_PLS_MASK) == XDEV_U0 |
@@ -664,7 +673,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
664 | xhci_dbg(xhci, "PORTSC %04x\n", temp); | 673 | xhci_dbg(xhci, "PORTSC %04x\n", temp); |
665 | if (temp & PORT_RESET) | 674 | if (temp & PORT_RESET) |
666 | goto error; | 675 | goto error; |
667 | if (temp & XDEV_U3) { | 676 | if ((temp & PORT_PLS_MASK) == XDEV_U3) { |
668 | if ((temp & PORT_PE) == 0) | 677 | if ((temp & PORT_PE) == 0) |
669 | goto error; | 678 | goto error; |
670 | 679 | ||
@@ -752,7 +761,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) | |||
752 | memset(buf, 0, retval); | 761 | memset(buf, 0, retval); |
753 | status = 0; | 762 | status = 0; |
754 | 763 | ||
755 | mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC; | 764 | mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC; |
756 | 765 | ||
757 | spin_lock_irqsave(&xhci->lock, flags); | 766 | spin_lock_irqsave(&xhci->lock, flags); |
758 | /* For each port, did anything change? If so, set that bit in buf. */ | 767 | /* For each port, did anything change? If so, set that bit in buf. */ |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 7113d16e2d3a..952e2ded61af 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -514,8 +514,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
514 | (unsigned long long) addr); | 514 | (unsigned long long) addr); |
515 | } | 515 | } |
516 | 516 | ||
517 | /* flip_cycle means flip the cycle bit of all but the first and last TRB. | ||
518 | * (The last TRB actually points to the ring enqueue pointer, which is not part | ||
519 | * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. | ||
520 | */ | ||
517 | static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | 521 | static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, |
518 | struct xhci_td *cur_td) | 522 | struct xhci_td *cur_td, bool flip_cycle) |
519 | { | 523 | { |
520 | struct xhci_segment *cur_seg; | 524 | struct xhci_segment *cur_seg; |
521 | union xhci_trb *cur_trb; | 525 | union xhci_trb *cur_trb; |
@@ -528,6 +532,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
528 | * leave the pointers intact. | 532 | * leave the pointers intact. |
529 | */ | 533 | */ |
530 | cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); | 534 | cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); |
535 | /* Flip the cycle bit (link TRBs can't be the first | ||
536 | * or last TRB). | ||
537 | */ | ||
538 | if (flip_cycle) | ||
539 | cur_trb->generic.field[3] ^= | ||
540 | cpu_to_le32(TRB_CYCLE); | ||
531 | xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); | 541 | xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); |
532 | xhci_dbg(xhci, "Address = %p (0x%llx dma); " | 542 | xhci_dbg(xhci, "Address = %p (0x%llx dma); " |
533 | "in seg %p (0x%llx dma)\n", | 543 | "in seg %p (0x%llx dma)\n", |
@@ -541,6 +551,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
541 | cur_trb->generic.field[2] = 0; | 551 | cur_trb->generic.field[2] = 0; |
542 | /* Preserve only the cycle bit of this TRB */ | 552 | /* Preserve only the cycle bit of this TRB */ |
543 | cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); | 553 | cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); |
554 | /* Flip the cycle bit except on the first or last TRB */ | ||
555 | if (flip_cycle && cur_trb != cur_td->first_trb && | ||
556 | cur_trb != cur_td->last_trb) | ||
557 | cur_trb->generic.field[3] ^= | ||
558 | cpu_to_le32(TRB_CYCLE); | ||
544 | cur_trb->generic.field[3] |= cpu_to_le32( | 559 | cur_trb->generic.field[3] |= cpu_to_le32( |
545 | TRB_TYPE(TRB_TR_NOOP)); | 560 | TRB_TYPE(TRB_TR_NOOP)); |
546 | xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " | 561 | xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " |
@@ -719,14 +734,14 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
719 | cur_td->urb->stream_id, | 734 | cur_td->urb->stream_id, |
720 | cur_td, &deq_state); | 735 | cur_td, &deq_state); |
721 | else | 736 | else |
722 | td_to_noop(xhci, ep_ring, cur_td); | 737 | td_to_noop(xhci, ep_ring, cur_td, false); |
723 | remove_finished_td: | 738 | remove_finished_td: |
724 | /* | 739 | /* |
725 | * The event handler won't see a completion for this TD anymore, | 740 | * The event handler won't see a completion for this TD anymore, |
726 | * so remove it from the endpoint ring's TD list. Keep it in | 741 | * so remove it from the endpoint ring's TD list. Keep it in |
727 | * the cancelled TD list for URB completion later. | 742 | * the cancelled TD list for URB completion later. |
728 | */ | 743 | */ |
729 | list_del(&cur_td->td_list); | 744 | list_del_init(&cur_td->td_list); |
730 | } | 745 | } |
731 | last_unlinked_td = cur_td; | 746 | last_unlinked_td = cur_td; |
732 | xhci_stop_watchdog_timer_in_irq(xhci, ep); | 747 | xhci_stop_watchdog_timer_in_irq(xhci, ep); |
@@ -754,7 +769,7 @@ remove_finished_td: | |||
754 | do { | 769 | do { |
755 | cur_td = list_entry(ep->cancelled_td_list.next, | 770 | cur_td = list_entry(ep->cancelled_td_list.next, |
756 | struct xhci_td, cancelled_td_list); | 771 | struct xhci_td, cancelled_td_list); |
757 | list_del(&cur_td->cancelled_td_list); | 772 | list_del_init(&cur_td->cancelled_td_list); |
758 | 773 | ||
759 | /* Clean up the cancelled URB */ | 774 | /* Clean up the cancelled URB */ |
760 | /* Doesn't matter what we pass for status, since the core will | 775 | /* Doesn't matter what we pass for status, since the core will |
@@ -862,9 +877,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) | |||
862 | cur_td = list_first_entry(&ring->td_list, | 877 | cur_td = list_first_entry(&ring->td_list, |
863 | struct xhci_td, | 878 | struct xhci_td, |
864 | td_list); | 879 | td_list); |
865 | list_del(&cur_td->td_list); | 880 | list_del_init(&cur_td->td_list); |
866 | if (!list_empty(&cur_td->cancelled_td_list)) | 881 | if (!list_empty(&cur_td->cancelled_td_list)) |
867 | list_del(&cur_td->cancelled_td_list); | 882 | list_del_init(&cur_td->cancelled_td_list); |
868 | xhci_giveback_urb_in_irq(xhci, cur_td, | 883 | xhci_giveback_urb_in_irq(xhci, cur_td, |
869 | -ESHUTDOWN, "killed"); | 884 | -ESHUTDOWN, "killed"); |
870 | } | 885 | } |
@@ -873,7 +888,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) | |||
873 | &temp_ep->cancelled_td_list, | 888 | &temp_ep->cancelled_td_list, |
874 | struct xhci_td, | 889 | struct xhci_td, |
875 | cancelled_td_list); | 890 | cancelled_td_list); |
876 | list_del(&cur_td->cancelled_td_list); | 891 | list_del_init(&cur_td->cancelled_td_list); |
877 | xhci_giveback_urb_in_irq(xhci, cur_td, | 892 | xhci_giveback_urb_in_irq(xhci, cur_td, |
878 | -ESHUTDOWN, "killed"); | 893 | -ESHUTDOWN, "killed"); |
879 | } | 894 | } |
@@ -1565,10 +1580,10 @@ td_cleanup: | |||
1565 | else | 1580 | else |
1566 | *status = 0; | 1581 | *status = 0; |
1567 | } | 1582 | } |
1568 | list_del(&td->td_list); | 1583 | list_del_init(&td->td_list); |
1569 | /* Was this TD slated to be cancelled but completed anyway? */ | 1584 | /* Was this TD slated to be cancelled but completed anyway? */ |
1570 | if (!list_empty(&td->cancelled_td_list)) | 1585 | if (!list_empty(&td->cancelled_td_list)) |
1571 | list_del(&td->cancelled_td_list); | 1586 | list_del_init(&td->cancelled_td_list); |
1572 | 1587 | ||
1573 | urb_priv->td_cnt++; | 1588 | urb_priv->td_cnt++; |
1574 | /* Giveback the urb when all the tds are completed */ | 1589 | /* Giveback the urb when all the tds are completed */ |
@@ -1919,8 +1934,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
1919 | int status = -EINPROGRESS; | 1934 | int status = -EINPROGRESS; |
1920 | struct urb_priv *urb_priv; | 1935 | struct urb_priv *urb_priv; |
1921 | struct xhci_ep_ctx *ep_ctx; | 1936 | struct xhci_ep_ctx *ep_ctx; |
1937 | struct list_head *tmp; | ||
1922 | u32 trb_comp_code; | 1938 | u32 trb_comp_code; |
1923 | int ret = 0; | 1939 | int ret = 0; |
1940 | int td_num = 0; | ||
1924 | 1941 | ||
1925 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); | 1942 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); |
1926 | xdev = xhci->devs[slot_id]; | 1943 | xdev = xhci->devs[slot_id]; |
@@ -1942,6 +1959,12 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
1942 | return -ENODEV; | 1959 | return -ENODEV; |
1943 | } | 1960 | } |
1944 | 1961 | ||
1962 | /* Count current td numbers if ep->skip is set */ | ||
1963 | if (ep->skip) { | ||
1964 | list_for_each(tmp, &ep_ring->td_list) | ||
1965 | td_num++; | ||
1966 | } | ||
1967 | |||
1945 | event_dma = le64_to_cpu(event->buffer); | 1968 | event_dma = le64_to_cpu(event->buffer); |
1946 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); | 1969 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); |
1947 | /* Look for common error cases */ | 1970 | /* Look for common error cases */ |
@@ -2053,7 +2076,18 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
2053 | goto cleanup; | 2076 | goto cleanup; |
2054 | } | 2077 | } |
2055 | 2078 | ||
2079 | /* We've skipped all the TDs on the ep ring when ep->skip set */ | ||
2080 | if (ep->skip && td_num == 0) { | ||
2081 | ep->skip = false; | ||
2082 | xhci_dbg(xhci, "All tds on the ep_ring skipped. " | ||
2083 | "Clear skip flag.\n"); | ||
2084 | ret = 0; | ||
2085 | goto cleanup; | ||
2086 | } | ||
2087 | |||
2056 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); | 2088 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); |
2089 | if (ep->skip) | ||
2090 | td_num--; | ||
2057 | 2091 | ||
2058 | /* Is this a TRB in the currently executing TD? */ | 2092 | /* Is this a TRB in the currently executing TD? */ |
2059 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, | 2093 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, |
@@ -2500,11 +2534,8 @@ static int prepare_transfer(struct xhci_hcd *xhci, | |||
2500 | 2534 | ||
2501 | if (td_index == 0) { | 2535 | if (td_index == 0) { |
2502 | ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); | 2536 | ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); |
2503 | if (unlikely(ret)) { | 2537 | if (unlikely(ret)) |
2504 | xhci_urb_free_priv(xhci, urb_priv); | ||
2505 | urb->hcpriv = NULL; | ||
2506 | return ret; | 2538 | return ret; |
2507 | } | ||
2508 | } | 2539 | } |
2509 | 2540 | ||
2510 | td->urb = urb; | 2541 | td->urb = urb; |
@@ -2672,6 +2703,10 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len, | |||
2672 | { | 2703 | { |
2673 | int packets_transferred; | 2704 | int packets_transferred; |
2674 | 2705 | ||
2706 | /* One TRB with a zero-length data packet. */ | ||
2707 | if (running_total == 0 && trb_buff_len == 0) | ||
2708 | return 0; | ||
2709 | |||
2675 | /* All the TRB queueing functions don't count the current TRB in | 2710 | /* All the TRB queueing functions don't count the current TRB in |
2676 | * running_total. | 2711 | * running_total. |
2677 | */ | 2712 | */ |
@@ -3113,20 +3148,15 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci, | |||
3113 | struct urb *urb, int i) | 3148 | struct urb *urb, int i) |
3114 | { | 3149 | { |
3115 | int num_trbs = 0; | 3150 | int num_trbs = 0; |
3116 | u64 addr, td_len, running_total; | 3151 | u64 addr, td_len; |
3117 | 3152 | ||
3118 | addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); | 3153 | addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); |
3119 | td_len = urb->iso_frame_desc[i].length; | 3154 | td_len = urb->iso_frame_desc[i].length; |
3120 | 3155 | ||
3121 | running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); | 3156 | num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)), |
3122 | running_total &= TRB_MAX_BUFF_SIZE - 1; | 3157 | TRB_MAX_BUFF_SIZE); |
3123 | if (running_total != 0) | 3158 | if (num_trbs == 0) |
3124 | num_trbs++; | ||
3125 | |||
3126 | while (running_total < td_len) { | ||
3127 | num_trbs++; | 3159 | num_trbs++; |
3128 | running_total += TRB_MAX_BUFF_SIZE; | ||
3129 | } | ||
3130 | 3160 | ||
3131 | return num_trbs; | 3161 | return num_trbs; |
3132 | } | 3162 | } |
@@ -3226,6 +3256,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3226 | start_trb = &ep_ring->enqueue->generic; | 3256 | start_trb = &ep_ring->enqueue->generic; |
3227 | start_cycle = ep_ring->cycle_state; | 3257 | start_cycle = ep_ring->cycle_state; |
3228 | 3258 | ||
3259 | urb_priv = urb->hcpriv; | ||
3229 | /* Queue the first TRB, even if it's zero-length */ | 3260 | /* Queue the first TRB, even if it's zero-length */ |
3230 | for (i = 0; i < num_tds; i++) { | 3261 | for (i = 0; i < num_tds; i++) { |
3231 | unsigned int total_packet_count; | 3262 | unsigned int total_packet_count; |
@@ -3237,9 +3268,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3237 | addr = start_addr + urb->iso_frame_desc[i].offset; | 3268 | addr = start_addr + urb->iso_frame_desc[i].offset; |
3238 | td_len = urb->iso_frame_desc[i].length; | 3269 | td_len = urb->iso_frame_desc[i].length; |
3239 | td_remain_len = td_len; | 3270 | td_remain_len = td_len; |
3240 | /* FIXME: Ignoring zero-length packets, can those happen? */ | ||
3241 | total_packet_count = roundup(td_len, | 3271 | total_packet_count = roundup(td_len, |
3242 | le16_to_cpu(urb->ep->desc.wMaxPacketSize)); | 3272 | le16_to_cpu(urb->ep->desc.wMaxPacketSize)); |
3273 | /* A zero-length transfer still involves at least one packet. */ | ||
3274 | if (total_packet_count == 0) | ||
3275 | total_packet_count++; | ||
3243 | burst_count = xhci_get_burst_count(xhci, urb->dev, urb, | 3276 | burst_count = xhci_get_burst_count(xhci, urb->dev, urb, |
3244 | total_packet_count); | 3277 | total_packet_count); |
3245 | residue = xhci_get_last_burst_packet_count(xhci, | 3278 | residue = xhci_get_last_burst_packet_count(xhci, |
@@ -3249,12 +3282,13 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3249 | 3282 | ||
3250 | ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, | 3283 | ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, |
3251 | urb->stream_id, trbs_per_td, urb, i, mem_flags); | 3284 | urb->stream_id, trbs_per_td, urb, i, mem_flags); |
3252 | if (ret < 0) | 3285 | if (ret < 0) { |
3253 | return ret; | 3286 | if (i == 0) |
3287 | return ret; | ||
3288 | goto cleanup; | ||
3289 | } | ||
3254 | 3290 | ||
3255 | urb_priv = urb->hcpriv; | ||
3256 | td = urb_priv->td[i]; | 3291 | td = urb_priv->td[i]; |
3257 | |||
3258 | for (j = 0; j < trbs_per_td; j++) { | 3292 | for (j = 0; j < trbs_per_td; j++) { |
3259 | u32 remainder = 0; | 3293 | u32 remainder = 0; |
3260 | field = TRB_TBC(burst_count) | TRB_TLBPC(residue); | 3294 | field = TRB_TBC(burst_count) | TRB_TLBPC(residue); |
@@ -3344,6 +3378,27 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3344 | giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, | 3378 | giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, |
3345 | start_cycle, start_trb); | 3379 | start_cycle, start_trb); |
3346 | return 0; | 3380 | return 0; |
3381 | cleanup: | ||
3382 | /* Clean up a partially enqueued isoc transfer. */ | ||
3383 | |||
3384 | for (i--; i >= 0; i--) | ||
3385 | list_del_init(&urb_priv->td[i]->td_list); | ||
3386 | |||
3387 | /* Use the first TD as a temporary variable to turn the TDs we've queued | ||
3388 | * into No-ops with a software-owned cycle bit. That way the hardware | ||
3389 | * won't accidentally start executing bogus TDs when we partially | ||
3390 | * overwrite them. td->first_trb and td->start_seg are already set. | ||
3391 | */ | ||
3392 | urb_priv->td[0]->last_trb = ep_ring->enqueue; | ||
3393 | /* Every TRB except the first & last will have its cycle bit flipped. */ | ||
3394 | td_to_noop(xhci, ep_ring, urb_priv->td[0], true); | ||
3395 | |||
3396 | /* Reset the ring enqueue back to the first TRB and its cycle bit. */ | ||
3397 | ep_ring->enqueue = urb_priv->td[0]->first_trb; | ||
3398 | ep_ring->enq_seg = urb_priv->td[0]->start_seg; | ||
3399 | ep_ring->cycle_state = start_cycle; | ||
3400 | usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); | ||
3401 | return ret; | ||
3347 | } | 3402 | } |
3348 | 3403 | ||
3349 | /* | 3404 | /* |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 1c4432d8fc10..3a0f695138f4 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -1085,8 +1085,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
1085 | if (urb->dev->speed == USB_SPEED_FULL) { | 1085 | if (urb->dev->speed == USB_SPEED_FULL) { |
1086 | ret = xhci_check_maxpacket(xhci, slot_id, | 1086 | ret = xhci_check_maxpacket(xhci, slot_id, |
1087 | ep_index, urb); | 1087 | ep_index, urb); |
1088 | if (ret < 0) | 1088 | if (ret < 0) { |
1089 | xhci_urb_free_priv(xhci, urb_priv); | ||
1090 | urb->hcpriv = NULL; | ||
1089 | return ret; | 1091 | return ret; |
1092 | } | ||
1090 | } | 1093 | } |
1091 | 1094 | ||
1092 | /* We have a spinlock and interrupts disabled, so we must pass | 1095 | /* We have a spinlock and interrupts disabled, so we must pass |
@@ -1097,6 +1100,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
1097 | goto dying; | 1100 | goto dying; |
1098 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, | 1101 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, |
1099 | slot_id, ep_index); | 1102 | slot_id, ep_index); |
1103 | if (ret) | ||
1104 | goto free_priv; | ||
1100 | spin_unlock_irqrestore(&xhci->lock, flags); | 1105 | spin_unlock_irqrestore(&xhci->lock, flags); |
1101 | } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { | 1106 | } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { |
1102 | spin_lock_irqsave(&xhci->lock, flags); | 1107 | spin_lock_irqsave(&xhci->lock, flags); |
@@ -1117,6 +1122,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
1117 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, | 1122 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, |
1118 | slot_id, ep_index); | 1123 | slot_id, ep_index); |
1119 | } | 1124 | } |
1125 | if (ret) | ||
1126 | goto free_priv; | ||
1120 | spin_unlock_irqrestore(&xhci->lock, flags); | 1127 | spin_unlock_irqrestore(&xhci->lock, flags); |
1121 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { | 1128 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { |
1122 | spin_lock_irqsave(&xhci->lock, flags); | 1129 | spin_lock_irqsave(&xhci->lock, flags); |
@@ -1124,6 +1131,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
1124 | goto dying; | 1131 | goto dying; |
1125 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, | 1132 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, |
1126 | slot_id, ep_index); | 1133 | slot_id, ep_index); |
1134 | if (ret) | ||
1135 | goto free_priv; | ||
1127 | spin_unlock_irqrestore(&xhci->lock, flags); | 1136 | spin_unlock_irqrestore(&xhci->lock, flags); |
1128 | } else { | 1137 | } else { |
1129 | spin_lock_irqsave(&xhci->lock, flags); | 1138 | spin_lock_irqsave(&xhci->lock, flags); |
@@ -1131,18 +1140,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
1131 | goto dying; | 1140 | goto dying; |
1132 | ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, | 1141 | ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, |
1133 | slot_id, ep_index); | 1142 | slot_id, ep_index); |
1143 | if (ret) | ||
1144 | goto free_priv; | ||
1134 | spin_unlock_irqrestore(&xhci->lock, flags); | 1145 | spin_unlock_irqrestore(&xhci->lock, flags); |
1135 | } | 1146 | } |
1136 | exit: | 1147 | exit: |
1137 | return ret; | 1148 | return ret; |
1138 | dying: | 1149 | dying: |
1139 | xhci_urb_free_priv(xhci, urb_priv); | ||
1140 | urb->hcpriv = NULL; | ||
1141 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " | 1150 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " |
1142 | "non-responsive xHCI host.\n", | 1151 | "non-responsive xHCI host.\n", |
1143 | urb->ep->desc.bEndpointAddress, urb); | 1152 | urb->ep->desc.bEndpointAddress, urb); |
1153 | ret = -ESHUTDOWN; | ||
1154 | free_priv: | ||
1155 | xhci_urb_free_priv(xhci, urb_priv); | ||
1156 | urb->hcpriv = NULL; | ||
1144 | spin_unlock_irqrestore(&xhci->lock, flags); | 1157 | spin_unlock_irqrestore(&xhci->lock, flags); |
1145 | return -ESHUTDOWN; | 1158 | return ret; |
1146 | } | 1159 | } |
1147 | 1160 | ||
1148 | /* Get the right ring for the given URB. | 1161 | /* Get the right ring for the given URB. |
@@ -1239,6 +1252,13 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
1239 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { | 1252 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { |
1240 | xhci_dbg(xhci, "HW died, freeing TD.\n"); | 1253 | xhci_dbg(xhci, "HW died, freeing TD.\n"); |
1241 | urb_priv = urb->hcpriv; | 1254 | urb_priv = urb->hcpriv; |
1255 | for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { | ||
1256 | td = urb_priv->td[i]; | ||
1257 | if (!list_empty(&td->td_list)) | ||
1258 | list_del_init(&td->td_list); | ||
1259 | if (!list_empty(&td->cancelled_td_list)) | ||
1260 | list_del_init(&td->cancelled_td_list); | ||
1261 | } | ||
1242 | 1262 | ||
1243 | usb_hcd_unlink_urb_from_ep(hcd, urb); | 1263 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
1244 | spin_unlock_irqrestore(&xhci->lock, flags); | 1264 | spin_unlock_irqrestore(&xhci->lock, flags); |
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index ae8c39617743..5e7cfba5b079 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
20 | #include <linux/prefetch.h> | ||
20 | 21 | ||
21 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
22 | 23 | ||
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index 149f3f310a0a..318fb4e8a885 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c | |||
@@ -226,8 +226,10 @@ static int cppi_controller_stop(struct dma_controller *c) | |||
226 | struct cppi *controller; | 226 | struct cppi *controller; |
227 | void __iomem *tibase; | 227 | void __iomem *tibase; |
228 | int i; | 228 | int i; |
229 | struct musb *musb; | ||
229 | 230 | ||
230 | controller = container_of(c, struct cppi, controller); | 231 | controller = container_of(c, struct cppi, controller); |
232 | musb = controller->musb; | ||
231 | 233 | ||
232 | tibase = controller->tibase; | 234 | tibase = controller->tibase; |
233 | /* DISABLE INDIVIDUAL CHANNEL Interrupts */ | 235 | /* DISABLE INDIVIDUAL CHANNEL Interrupts */ |
@@ -289,9 +291,11 @@ cppi_channel_allocate(struct dma_controller *c, | |||
289 | u8 index; | 291 | u8 index; |
290 | struct cppi_channel *cppi_ch; | 292 | struct cppi_channel *cppi_ch; |
291 | void __iomem *tibase; | 293 | void __iomem *tibase; |
294 | struct musb *musb; | ||
292 | 295 | ||
293 | controller = container_of(c, struct cppi, controller); | 296 | controller = container_of(c, struct cppi, controller); |
294 | tibase = controller->tibase; | 297 | tibase = controller->tibase; |
298 | musb = controller->musb; | ||
295 | 299 | ||
296 | /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ | 300 | /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ |
297 | index = ep->epnum - 1; | 301 | index = ep->epnum - 1; |
@@ -339,7 +343,8 @@ static void cppi_channel_release(struct dma_channel *channel) | |||
339 | c = container_of(channel, struct cppi_channel, channel); | 343 | c = container_of(channel, struct cppi_channel, channel); |
340 | tibase = c->controller->tibase; | 344 | tibase = c->controller->tibase; |
341 | if (!c->hw_ep) | 345 | if (!c->hw_ep) |
342 | dev_dbg(musb->controller, "releasing idle DMA channel %p\n", c); | 346 | dev_dbg(c->controller->musb->controller, |
347 | "releasing idle DMA channel %p\n", c); | ||
343 | else if (!c->transmit) | 348 | else if (!c->transmit) |
344 | core_rxirq_enable(tibase, c->index + 1); | 349 | core_rxirq_enable(tibase, c->index + 1); |
345 | 350 | ||
@@ -357,10 +362,11 @@ cppi_dump_rx(int level, struct cppi_channel *c, const char *tag) | |||
357 | 362 | ||
358 | musb_ep_select(base, c->index + 1); | 363 | musb_ep_select(base, c->index + 1); |
359 | 364 | ||
360 | DBG(level, "RX DMA%d%s: %d left, csr %04x, " | 365 | dev_dbg(c->controller->musb->controller, |
361 | "%08x H%08x S%08x C%08x, " | 366 | "RX DMA%d%s: %d left, csr %04x, " |
362 | "B%08x L%08x %08x .. %08x" | 367 | "%08x H%08x S%08x C%08x, " |
363 | "\n", | 368 | "B%08x L%08x %08x .. %08x" |
369 | "\n", | ||
364 | c->index, tag, | 370 | c->index, tag, |
365 | musb_readl(c->controller->tibase, | 371 | musb_readl(c->controller->tibase, |
366 | DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), | 372 | DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), |
@@ -387,10 +393,11 @@ cppi_dump_tx(int level, struct cppi_channel *c, const char *tag) | |||
387 | 393 | ||
388 | musb_ep_select(base, c->index + 1); | 394 | musb_ep_select(base, c->index + 1); |
389 | 395 | ||
390 | DBG(level, "TX DMA%d%s: csr %04x, " | 396 | dev_dbg(c->controller->musb->controller, |
391 | "H%08x S%08x C%08x %08x, " | 397 | "TX DMA%d%s: csr %04x, " |
392 | "F%08x L%08x .. %08x" | 398 | "H%08x S%08x C%08x %08x, " |
393 | "\n", | 399 | "F%08x L%08x .. %08x" |
400 | "\n", | ||
394 | c->index, tag, | 401 | c->index, tag, |
395 | musb_readw(c->hw_ep->regs, MUSB_TXCSR), | 402 | musb_readw(c->hw_ep->regs, MUSB_TXCSR), |
396 | 403 | ||
@@ -1022,6 +1029,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) | |||
1022 | int i; | 1029 | int i; |
1023 | dma_addr_t safe2ack; | 1030 | dma_addr_t safe2ack; |
1024 | void __iomem *regs = rx->hw_ep->regs; | 1031 | void __iomem *regs = rx->hw_ep->regs; |
1032 | struct musb *musb = cppi->musb; | ||
1025 | 1033 | ||
1026 | cppi_dump_rx(6, rx, "/K"); | 1034 | cppi_dump_rx(6, rx, "/K"); |
1027 | 1035 | ||
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 668eeef601ae..b3c065ab9dbc 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h | |||
@@ -172,7 +172,8 @@ enum musb_g_ep0_state { | |||
172 | #endif | 172 | #endif |
173 | 173 | ||
174 | /* TUSB mapping: "flat" plus ep0 special cases */ | 174 | /* TUSB mapping: "flat" plus ep0 special cases */ |
175 | #if defined(CONFIG_USB_MUSB_TUSB6010) | 175 | #if defined(CONFIG_USB_MUSB_TUSB6010) || \ |
176 | defined(CONFIG_USB_MUSB_TUSB6010_MODULE) | ||
176 | #define musb_ep_select(_mbase, _epnum) \ | 177 | #define musb_ep_select(_mbase, _epnum) \ |
177 | musb_writeb((_mbase), MUSB_INDEX, (_epnum)) | 178 | musb_writeb((_mbase), MUSB_INDEX, (_epnum)) |
178 | #define MUSB_EP_OFFSET MUSB_TUSB_OFFSET | 179 | #define MUSB_EP_OFFSET MUSB_TUSB_OFFSET |
@@ -241,7 +242,8 @@ struct musb_hw_ep { | |||
241 | void __iomem *fifo; | 242 | void __iomem *fifo; |
242 | void __iomem *regs; | 243 | void __iomem *regs; |
243 | 244 | ||
244 | #ifdef CONFIG_USB_MUSB_TUSB6010 | 245 | #if defined(CONFIG_USB_MUSB_TUSB6010) || \ |
246 | defined(CONFIG_USB_MUSB_TUSB6010_MODULE) | ||
245 | void __iomem *conf; | 247 | void __iomem *conf; |
246 | #endif | 248 | #endif |
247 | 249 | ||
@@ -258,7 +260,8 @@ struct musb_hw_ep { | |||
258 | struct dma_channel *tx_channel; | 260 | struct dma_channel *tx_channel; |
259 | struct dma_channel *rx_channel; | 261 | struct dma_channel *rx_channel; |
260 | 262 | ||
261 | #ifdef CONFIG_USB_MUSB_TUSB6010 | 263 | #if defined(CONFIG_USB_MUSB_TUSB6010) || \ |
264 | defined(CONFIG_USB_MUSB_TUSB6010_MODULE) | ||
262 | /* TUSB has "asynchronous" and "synchronous" dma modes */ | 265 | /* TUSB has "asynchronous" and "synchronous" dma modes */ |
263 | dma_addr_t fifo_async; | 266 | dma_addr_t fifo_async; |
264 | dma_addr_t fifo_sync; | 267 | dma_addr_t fifo_sync; |
@@ -356,7 +359,8 @@ struct musb { | |||
356 | void __iomem *ctrl_base; | 359 | void __iomem *ctrl_base; |
357 | void __iomem *mregs; | 360 | void __iomem *mregs; |
358 | 361 | ||
359 | #ifdef CONFIG_USB_MUSB_TUSB6010 | 362 | #if defined(CONFIG_USB_MUSB_TUSB6010) || \ |
363 | defined(CONFIG_USB_MUSB_TUSB6010_MODULE) | ||
360 | dma_addr_t async; | 364 | dma_addr_t async; |
361 | dma_addr_t sync; | 365 | dma_addr_t sync; |
362 | void __iomem *sync_va; | 366 | void __iomem *sync_va; |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 8c41a2e6ea77..e81820370d6f 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -1856,6 +1856,7 @@ int __init musb_gadget_setup(struct musb *musb) | |||
1856 | 1856 | ||
1857 | return 0; | 1857 | return 0; |
1858 | err: | 1858 | err: |
1859 | musb->g.dev.parent = NULL; | ||
1859 | device_unregister(&musb->g.dev); | 1860 | device_unregister(&musb->g.dev); |
1860 | return status; | 1861 | return status; |
1861 | } | 1862 | } |
@@ -1863,7 +1864,8 @@ err: | |||
1863 | void musb_gadget_cleanup(struct musb *musb) | 1864 | void musb_gadget_cleanup(struct musb *musb) |
1864 | { | 1865 | { |
1865 | usb_del_gadget_udc(&musb->g); | 1866 | usb_del_gadget_udc(&musb->g); |
1866 | device_unregister(&musb->g.dev); | 1867 | if (musb->g.dev.parent) |
1868 | device_unregister(&musb->g.dev); | ||
1867 | } | 1869 | } |
1868 | 1870 | ||
1869 | /* | 1871 | /* |
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h index 82410703dcd3..03f2655af290 100644 --- a/drivers/usb/musb/musb_regs.h +++ b/drivers/usb/musb/musb_regs.h | |||
@@ -234,7 +234,8 @@ | |||
234 | #define MUSB_TESTMODE 0x0F /* 8 bit */ | 234 | #define MUSB_TESTMODE 0x0F /* 8 bit */ |
235 | 235 | ||
236 | /* Get offset for a given FIFO from musb->mregs */ | 236 | /* Get offset for a given FIFO from musb->mregs */ |
237 | #ifdef CONFIG_USB_MUSB_TUSB6010 | 237 | #if defined(CONFIG_USB_MUSB_TUSB6010) || \ |
238 | defined(CONFIG_USB_MUSB_TUSB6010_MODULE) | ||
238 | #define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20)) | 239 | #define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20)) |
239 | #else | 240 | #else |
240 | #define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4)) | 241 | #define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4)) |
@@ -295,7 +296,8 @@ | |||
295 | #define MUSB_FLAT_OFFSET(_epnum, _offset) \ | 296 | #define MUSB_FLAT_OFFSET(_epnum, _offset) \ |
296 | (0x100 + (0x10*(_epnum)) + (_offset)) | 297 | (0x100 + (0x10*(_epnum)) + (_offset)) |
297 | 298 | ||
298 | #ifdef CONFIG_USB_MUSB_TUSB6010 | 299 | #if defined(CONFIG_USB_MUSB_TUSB6010) || \ |
300 | defined(CONFIG_USB_MUSB_TUSB6010_MODULE) | ||
299 | /* TUSB6010 EP0 configuration register is special */ | 301 | /* TUSB6010 EP0 configuration register is special */ |
300 | #define MUSB_TUSB_OFFSET(_epnum, _offset) \ | 302 | #define MUSB_TUSB_OFFSET(_epnum, _offset) \ |
301 | (0x10 + _offset) | 303 | (0x10 + _offset) |
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 9eec41fbf3a4..ec1480191f78 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/prefetch.h> | ||
21 | #include <linux/usb.h> | 22 | #include <linux/usb.h> |
22 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
23 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c index 07c8a73dfe41..b67b4bc596c1 100644 --- a/drivers/usb/musb/tusb6010_omap.c +++ b/drivers/usb/musb/tusb6010_omap.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <plat/mux.h> | 20 | #include <plat/mux.h> |
21 | 21 | ||
22 | #include "musb_core.h" | 22 | #include "musb_core.h" |
23 | #include "tusb6010.h" | ||
23 | 24 | ||
24 | #define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data) | 25 | #define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data) |
25 | 26 | ||
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c index cecace411832..ef4333f4bbe0 100644 --- a/drivers/usb/musb/ux500_dma.c +++ b/drivers/usb/musb/ux500_dma.c | |||
@@ -65,7 +65,8 @@ static void ux500_tx_work(struct work_struct *data) | |||
65 | struct musb *musb = hw_ep->musb; | 65 | struct musb *musb = hw_ep->musb; |
66 | unsigned long flags; | 66 | unsigned long flags; |
67 | 67 | ||
68 | DBG(4, "DMA tx transfer done on hw_ep=%d\n", hw_ep->epnum); | 68 | dev_dbg(musb->controller, "DMA tx transfer done on hw_ep=%d\n", |
69 | hw_ep->epnum); | ||
69 | 70 | ||
70 | spin_lock_irqsave(&musb->lock, flags); | 71 | spin_lock_irqsave(&musb->lock, flags); |
71 | ux500_channel->channel.actual_len = ux500_channel->cur_len; | 72 | ux500_channel->channel.actual_len = ux500_channel->cur_len; |
@@ -84,7 +85,8 @@ static void ux500_rx_work(struct work_struct *data) | |||
84 | struct musb *musb = hw_ep->musb; | 85 | struct musb *musb = hw_ep->musb; |
85 | unsigned long flags; | 86 | unsigned long flags; |
86 | 87 | ||
87 | DBG(4, "DMA rx transfer done on hw_ep=%d\n", hw_ep->epnum); | 88 | dev_dbg(musb->controller, "DMA rx transfer done on hw_ep=%d\n", |
89 | hw_ep->epnum); | ||
88 | 90 | ||
89 | spin_lock_irqsave(&musb->lock, flags); | 91 | spin_lock_irqsave(&musb->lock, flags); |
90 | ux500_channel->channel.actual_len = ux500_channel->cur_len; | 92 | ux500_channel->channel.actual_len = ux500_channel->cur_len; |
@@ -116,9 +118,11 @@ static bool ux500_configure_channel(struct dma_channel *channel, | |||
116 | enum dma_slave_buswidth addr_width; | 118 | enum dma_slave_buswidth addr_width; |
117 | dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) + | 119 | dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) + |
118 | ux500_channel->controller->phy_base); | 120 | ux500_channel->controller->phy_base); |
121 | struct musb *musb = ux500_channel->controller->private_data; | ||
119 | 122 | ||
120 | DBG(4, "packet_sz=%d, mode=%d, dma_addr=0x%x, len=%d is_tx=%d\n", | 123 | dev_dbg(musb->controller, |
121 | packet_sz, mode, dma_addr, len, ux500_channel->is_tx); | 124 | "packet_sz=%d, mode=%d, dma_addr=0x%x, len=%d is_tx=%d\n", |
125 | packet_sz, mode, dma_addr, len, ux500_channel->is_tx); | ||
122 | 126 | ||
123 | ux500_channel->cur_len = len; | 127 | ux500_channel->cur_len = len; |
124 | 128 | ||
@@ -133,15 +137,13 @@ static bool ux500_configure_channel(struct dma_channel *channel, | |||
133 | DMA_SLAVE_BUSWIDTH_4_BYTES; | 137 | DMA_SLAVE_BUSWIDTH_4_BYTES; |
134 | 138 | ||
135 | slave_conf.direction = direction; | 139 | slave_conf.direction = direction; |
136 | if (direction == DMA_FROM_DEVICE) { | 140 | slave_conf.src_addr = usb_fifo_addr; |
137 | slave_conf.src_addr = usb_fifo_addr; | 141 | slave_conf.src_addr_width = addr_width; |
138 | slave_conf.src_addr_width = addr_width; | 142 | slave_conf.src_maxburst = 16; |
139 | slave_conf.src_maxburst = 16; | 143 | slave_conf.dst_addr = usb_fifo_addr; |
140 | } else { | 144 | slave_conf.dst_addr_width = addr_width; |
141 | slave_conf.dst_addr = usb_fifo_addr; | 145 | slave_conf.dst_maxburst = 16; |
142 | slave_conf.dst_addr_width = addr_width; | 146 | |
143 | slave_conf.dst_maxburst = 16; | ||
144 | } | ||
145 | dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG, | 147 | dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG, |
146 | (unsigned long) &slave_conf); | 148 | (unsigned long) &slave_conf); |
147 | 149 | ||
@@ -166,6 +168,7 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c, | |||
166 | struct ux500_dma_controller *controller = container_of(c, | 168 | struct ux500_dma_controller *controller = container_of(c, |
167 | struct ux500_dma_controller, controller); | 169 | struct ux500_dma_controller, controller); |
168 | struct ux500_dma_channel *ux500_channel = NULL; | 170 | struct ux500_dma_channel *ux500_channel = NULL; |
171 | struct musb *musb = controller->private_data; | ||
169 | u8 ch_num = hw_ep->epnum - 1; | 172 | u8 ch_num = hw_ep->epnum - 1; |
170 | u32 max_ch; | 173 | u32 max_ch; |
171 | 174 | ||
@@ -192,7 +195,7 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c, | |||
192 | ux500_channel->hw_ep = hw_ep; | 195 | ux500_channel->hw_ep = hw_ep; |
193 | ux500_channel->is_allocated = 1; | 196 | ux500_channel->is_allocated = 1; |
194 | 197 | ||
195 | DBG(7, "hw_ep=%d, is_tx=0x%x, channel=%d\n", | 198 | dev_dbg(musb->controller, "hw_ep=%d, is_tx=0x%x, channel=%d\n", |
196 | hw_ep->epnum, is_tx, ch_num); | 199 | hw_ep->epnum, is_tx, ch_num); |
197 | 200 | ||
198 | return &(ux500_channel->channel); | 201 | return &(ux500_channel->channel); |
@@ -201,8 +204,9 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c, | |||
201 | static void ux500_dma_channel_release(struct dma_channel *channel) | 204 | static void ux500_dma_channel_release(struct dma_channel *channel) |
202 | { | 205 | { |
203 | struct ux500_dma_channel *ux500_channel = channel->private_data; | 206 | struct ux500_dma_channel *ux500_channel = channel->private_data; |
207 | struct musb *musb = ux500_channel->controller->private_data; | ||
204 | 208 | ||
205 | DBG(7, "channel=%d\n", ux500_channel->ch_num); | 209 | dev_dbg(musb->controller, "channel=%d\n", ux500_channel->ch_num); |
206 | 210 | ||
207 | if (ux500_channel->is_allocated) { | 211 | if (ux500_channel->is_allocated) { |
208 | ux500_channel->is_allocated = 0; | 212 | ux500_channel->is_allocated = 0; |
@@ -252,8 +256,8 @@ static int ux500_dma_channel_abort(struct dma_channel *channel) | |||
252 | void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs; | 256 | void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs; |
253 | u16 csr; | 257 | u16 csr; |
254 | 258 | ||
255 | DBG(4, "channel=%d, is_tx=%d\n", ux500_channel->ch_num, | 259 | dev_dbg(musb->controller, "channel=%d, is_tx=%d\n", |
256 | ux500_channel->is_tx); | 260 | ux500_channel->ch_num, ux500_channel->is_tx); |
257 | 261 | ||
258 | if (channel->status == MUSB_DMA_STATUS_BUSY) { | 262 | if (channel->status == MUSB_DMA_STATUS_BUSY) { |
259 | if (ux500_channel->is_tx) { | 263 | if (ux500_channel->is_tx) { |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 78a2cf9551cc..5fc13e717911 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -101,6 +101,7 @@ static int ftdi_jtag_probe(struct usb_serial *serial); | |||
101 | static int ftdi_mtxorb_hack_setup(struct usb_serial *serial); | 101 | static int ftdi_mtxorb_hack_setup(struct usb_serial *serial); |
102 | static int ftdi_NDI_device_setup(struct usb_serial *serial); | 102 | static int ftdi_NDI_device_setup(struct usb_serial *serial); |
103 | static int ftdi_stmclite_probe(struct usb_serial *serial); | 103 | static int ftdi_stmclite_probe(struct usb_serial *serial); |
104 | static int ftdi_8u2232c_probe(struct usb_serial *serial); | ||
104 | static void ftdi_USB_UIRT_setup(struct ftdi_private *priv); | 105 | static void ftdi_USB_UIRT_setup(struct ftdi_private *priv); |
105 | static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv); | 106 | static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv); |
106 | 107 | ||
@@ -128,6 +129,10 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = { | |||
128 | .probe = ftdi_stmclite_probe, | 129 | .probe = ftdi_stmclite_probe, |
129 | }; | 130 | }; |
130 | 131 | ||
132 | static struct ftdi_sio_quirk ftdi_8u2232c_quirk = { | ||
133 | .probe = ftdi_8u2232c_probe, | ||
134 | }; | ||
135 | |||
131 | /* | 136 | /* |
132 | * The 8U232AM has the same API as the sio except for: | 137 | * The 8U232AM has the same API as the sio except for: |
133 | * - it can support MUCH higher baudrates; up to: | 138 | * - it can support MUCH higher baudrates; up to: |
@@ -178,7 +183,8 @@ static struct usb_device_id id_table_combined [] = { | |||
178 | { USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) }, | 183 | { USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) }, |
179 | { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) }, | 184 | { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) }, |
180 | { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) }, | 185 | { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) }, |
181 | { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) }, | 186 | { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) , |
187 | .driver_info = (kernel_ulong_t)&ftdi_8u2232c_quirk }, | ||
182 | { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) }, | 188 | { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) }, |
183 | { USB_DEVICE(FTDI_VID, FTDI_232H_PID) }, | 189 | { USB_DEVICE(FTDI_VID, FTDI_232H_PID) }, |
184 | { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) }, | 190 | { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) }, |
@@ -1737,6 +1743,18 @@ static int ftdi_jtag_probe(struct usb_serial *serial) | |||
1737 | return 0; | 1743 | return 0; |
1738 | } | 1744 | } |
1739 | 1745 | ||
1746 | static int ftdi_8u2232c_probe(struct usb_serial *serial) | ||
1747 | { | ||
1748 | struct usb_device *udev = serial->dev; | ||
1749 | |||
1750 | dbg("%s", __func__); | ||
1751 | |||
1752 | if (strcmp(udev->manufacturer, "CALAO Systems") == 0) | ||
1753 | return ftdi_jtag_probe(serial); | ||
1754 | |||
1755 | return 0; | ||
1756 | } | ||
1757 | |||
1740 | /* | 1758 | /* |
1741 | * First and second port on STMCLiteadaptors is reserved for JTAG interface | 1759 | * First and second port on STMCLiteadaptors is reserved for JTAG interface |
1742 | * and the forth port for pio | 1760 | * and the forth port for pio |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 815656198914..fe22e90bc879 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -148,6 +148,8 @@ static void option_instat_callback(struct urb *urb); | |||
148 | #define HUAWEI_PRODUCT_K4505 0x1464 | 148 | #define HUAWEI_PRODUCT_K4505 0x1464 |
149 | #define HUAWEI_PRODUCT_K3765 0x1465 | 149 | #define HUAWEI_PRODUCT_K3765 0x1465 |
150 | #define HUAWEI_PRODUCT_E14AC 0x14AC | 150 | #define HUAWEI_PRODUCT_E14AC 0x14AC |
151 | #define HUAWEI_PRODUCT_K3806 0x14AE | ||
152 | #define HUAWEI_PRODUCT_K4605 0x14C6 | ||
151 | #define HUAWEI_PRODUCT_K3770 0x14C9 | 153 | #define HUAWEI_PRODUCT_K3770 0x14C9 |
152 | #define HUAWEI_PRODUCT_K3771 0x14CA | 154 | #define HUAWEI_PRODUCT_K3771 0x14CA |
153 | #define HUAWEI_PRODUCT_K4510 0x14CB | 155 | #define HUAWEI_PRODUCT_K4510 0x14CB |
@@ -416,6 +418,56 @@ static void option_instat_callback(struct urb *urb); | |||
416 | #define SAMSUNG_VENDOR_ID 0x04e8 | 418 | #define SAMSUNG_VENDOR_ID 0x04e8 |
417 | #define SAMSUNG_PRODUCT_GT_B3730 0x6889 | 419 | #define SAMSUNG_PRODUCT_GT_B3730 0x6889 |
418 | 420 | ||
421 | /* YUGA products www.yuga-info.com*/ | ||
422 | #define YUGA_VENDOR_ID 0x257A | ||
423 | #define YUGA_PRODUCT_CEM600 0x1601 | ||
424 | #define YUGA_PRODUCT_CEM610 0x1602 | ||
425 | #define YUGA_PRODUCT_CEM500 0x1603 | ||
426 | #define YUGA_PRODUCT_CEM510 0x1604 | ||
427 | #define YUGA_PRODUCT_CEM800 0x1605 | ||
428 | #define YUGA_PRODUCT_CEM900 0x1606 | ||
429 | |||
430 | #define YUGA_PRODUCT_CEU818 0x1607 | ||
431 | #define YUGA_PRODUCT_CEU816 0x1608 | ||
432 | #define YUGA_PRODUCT_CEU828 0x1609 | ||
433 | #define YUGA_PRODUCT_CEU826 0x160A | ||
434 | #define YUGA_PRODUCT_CEU518 0x160B | ||
435 | #define YUGA_PRODUCT_CEU516 0x160C | ||
436 | #define YUGA_PRODUCT_CEU528 0x160D | ||
437 | #define YUGA_PRODUCT_CEU526 0x160F | ||
438 | |||
439 | #define YUGA_PRODUCT_CWM600 0x2601 | ||
440 | #define YUGA_PRODUCT_CWM610 0x2602 | ||
441 | #define YUGA_PRODUCT_CWM500 0x2603 | ||
442 | #define YUGA_PRODUCT_CWM510 0x2604 | ||
443 | #define YUGA_PRODUCT_CWM800 0x2605 | ||
444 | #define YUGA_PRODUCT_CWM900 0x2606 | ||
445 | |||
446 | #define YUGA_PRODUCT_CWU718 0x2607 | ||
447 | #define YUGA_PRODUCT_CWU716 0x2608 | ||
448 | #define YUGA_PRODUCT_CWU728 0x2609 | ||
449 | #define YUGA_PRODUCT_CWU726 0x260A | ||
450 | #define YUGA_PRODUCT_CWU518 0x260B | ||
451 | #define YUGA_PRODUCT_CWU516 0x260C | ||
452 | #define YUGA_PRODUCT_CWU528 0x260D | ||
453 | #define YUGA_PRODUCT_CWU526 0x260F | ||
454 | |||
455 | #define YUGA_PRODUCT_CLM600 0x2601 | ||
456 | #define YUGA_PRODUCT_CLM610 0x2602 | ||
457 | #define YUGA_PRODUCT_CLM500 0x2603 | ||
458 | #define YUGA_PRODUCT_CLM510 0x2604 | ||
459 | #define YUGA_PRODUCT_CLM800 0x2605 | ||
460 | #define YUGA_PRODUCT_CLM900 0x2606 | ||
461 | |||
462 | #define YUGA_PRODUCT_CLU718 0x2607 | ||
463 | #define YUGA_PRODUCT_CLU716 0x2608 | ||
464 | #define YUGA_PRODUCT_CLU728 0x2609 | ||
465 | #define YUGA_PRODUCT_CLU726 0x260A | ||
466 | #define YUGA_PRODUCT_CLU518 0x260B | ||
467 | #define YUGA_PRODUCT_CLU516 0x260C | ||
468 | #define YUGA_PRODUCT_CLU528 0x260D | ||
469 | #define YUGA_PRODUCT_CLU526 0x260F | ||
470 | |||
419 | /* some devices interfaces need special handling due to a number of reasons */ | 471 | /* some devices interfaces need special handling due to a number of reasons */ |
420 | enum option_blacklist_reason { | 472 | enum option_blacklist_reason { |
421 | OPTION_BLACKLIST_NONE = 0, | 473 | OPTION_BLACKLIST_NONE = 0, |
@@ -551,6 +603,8 @@ static const struct usb_device_id option_ids[] = { | |||
551 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, | 603 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, |
552 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, | 604 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, |
553 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, | 605 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, |
606 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) }, | ||
607 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff) }, | ||
554 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, | 608 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, |
555 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, | 609 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, |
556 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, | 610 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, |
@@ -1005,6 +1059,48 @@ static const struct usb_device_id option_ids[] = { | |||
1005 | { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ | 1059 | { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ |
1006 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ | 1060 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ |
1007 | { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ | 1061 | { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ |
1062 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, | ||
1063 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) }, | ||
1064 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM500) }, | ||
1065 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM510) }, | ||
1066 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM800) }, | ||
1067 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM900) }, | ||
1068 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU818) }, | ||
1069 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU816) }, | ||
1070 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU828) }, | ||
1071 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU826) }, | ||
1072 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU518) }, | ||
1073 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU516) }, | ||
1074 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU528) }, | ||
1075 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU526) }, | ||
1076 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM600) }, | ||
1077 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM610) }, | ||
1078 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM500) }, | ||
1079 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM510) }, | ||
1080 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM800) }, | ||
1081 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM900) }, | ||
1082 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU718) }, | ||
1083 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU716) }, | ||
1084 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU728) }, | ||
1085 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU726) }, | ||
1086 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU518) }, | ||
1087 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU516) }, | ||
1088 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU528) }, | ||
1089 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU526) }, | ||
1090 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM600) }, | ||
1091 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM610) }, | ||
1092 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM500) }, | ||
1093 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM510) }, | ||
1094 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM800) }, | ||
1095 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM900) }, | ||
1096 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU718) }, | ||
1097 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU716) }, | ||
1098 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU728) }, | ||
1099 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU726) }, | ||
1100 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU518) }, | ||
1101 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, | ||
1102 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, | ||
1103 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, | ||
1008 | { } /* Terminating entry */ | 1104 | { } /* Terminating entry */ |
1009 | }; | 1105 | }; |
1010 | MODULE_DEVICE_TABLE(usb, option_ids); | 1106 | MODULE_DEVICE_TABLE(usb, option_ids); |
@@ -1134,11 +1230,13 @@ static int option_probe(struct usb_serial *serial, | |||
1134 | serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff) | 1230 | serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff) |
1135 | return -ENODEV; | 1231 | return -ENODEV; |
1136 | 1232 | ||
1137 | /* Don't bind network interfaces on Huawei K3765 & K4505 */ | 1233 | /* Don't bind network interfaces on Huawei K3765, K4505 & K4605 */ |
1138 | if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID && | 1234 | if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID && |
1139 | (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 || | 1235 | (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 || |
1140 | serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505) && | 1236 | serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505 || |
1141 | serial->interface->cur_altsetting->desc.bInterfaceNumber == 1) | 1237 | serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4605) && |
1238 | (serial->interface->cur_altsetting->desc.bInterfaceNumber == 1 || | ||
1239 | serial->interface->cur_altsetting->desc.bInterfaceNumber == 2)) | ||
1142 | return -ENODEV; | 1240 | return -ENODEV; |
1143 | 1241 | ||
1144 | /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */ | 1242 | /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */ |
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c index 05a8832bb3eb..d06886a2bfb5 100644 --- a/drivers/video/backlight/adp8870_bl.c +++ b/drivers/video/backlight/adp8870_bl.c | |||
@@ -1009,4 +1009,4 @@ module_exit(adp8870_exit); | |||
1009 | MODULE_LICENSE("GPL v2"); | 1009 | MODULE_LICENSE("GPL v2"); |
1010 | MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); | 1010 | MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); |
1011 | MODULE_DESCRIPTION("ADP8870 Backlight driver"); | 1011 | MODULE_DESCRIPTION("ADP8870 Backlight driver"); |
1012 | MODULE_ALIAS("platform:adp8870-backlight"); | 1012 | MODULE_ALIAS("i2c:adp8870-backlight"); |
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 80d292fb92d8..7363c1b169e8 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <asm/backlight.h> | 19 | #include <asm/backlight.h> |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | static const char const *backlight_types[] = { | 22 | static const char *const backlight_types[] = { |
23 | [BACKLIGHT_RAW] = "raw", | 23 | [BACKLIGHT_RAW] = "raw", |
24 | [BACKLIGHT_PLATFORM] = "platform", | 24 | [BACKLIGHT_PLATFORM] = "platform", |
25 | [BACKLIGHT_FIRMWARE] = "firmware", | 25 | [BACKLIGHT_FIRMWARE] = "firmware", |
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c index 9f1e389d51d2..b0582917f0c8 100644 --- a/drivers/video/backlight/ep93xx_bl.c +++ b/drivers/video/backlight/ep93xx_bl.c | |||
@@ -11,7 +11,7 @@ | |||
11 | * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors. | 11 | * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | 14 | #include <linux/module.h> | |
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | #include <linux/fb.h> | 17 | #include <linux/fb.h> |
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index b8f38ec6eb18..8b5b2a4124c7 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c | |||
@@ -28,6 +28,8 @@ struct pwm_bl_data { | |||
28 | unsigned int lth_brightness; | 28 | unsigned int lth_brightness; |
29 | int (*notify)(struct device *, | 29 | int (*notify)(struct device *, |
30 | int brightness); | 30 | int brightness); |
31 | void (*notify_after)(struct device *, | ||
32 | int brightness); | ||
31 | int (*check_fb)(struct device *, struct fb_info *); | 33 | int (*check_fb)(struct device *, struct fb_info *); |
32 | }; | 34 | }; |
33 | 35 | ||
@@ -55,6 +57,10 @@ static int pwm_backlight_update_status(struct backlight_device *bl) | |||
55 | pwm_config(pb->pwm, brightness, pb->period); | 57 | pwm_config(pb->pwm, brightness, pb->period); |
56 | pwm_enable(pb->pwm); | 58 | pwm_enable(pb->pwm); |
57 | } | 59 | } |
60 | |||
61 | if (pb->notify_after) | ||
62 | pb->notify_after(pb->dev, brightness); | ||
63 | |||
58 | return 0; | 64 | return 0; |
59 | } | 65 | } |
60 | 66 | ||
@@ -105,6 +111,7 @@ static int pwm_backlight_probe(struct platform_device *pdev) | |||
105 | 111 | ||
106 | pb->period = data->pwm_period_ns; | 112 | pb->period = data->pwm_period_ns; |
107 | pb->notify = data->notify; | 113 | pb->notify = data->notify; |
114 | pb->notify_after = data->notify_after; | ||
108 | pb->check_fb = data->check_fb; | 115 | pb->check_fb = data->check_fb; |
109 | pb->lth_brightness = data->lth_brightness * | 116 | pb->lth_brightness = data->lth_brightness * |
110 | (data->pwm_period_ns / data->max_brightness); | 117 | (data->pwm_period_ns / data->max_brightness); |
@@ -172,6 +179,8 @@ static int pwm_backlight_suspend(struct platform_device *pdev, | |||
172 | pb->notify(pb->dev, 0); | 179 | pb->notify(pb->dev, 0); |
173 | pwm_config(pb->pwm, 0, pb->period); | 180 | pwm_config(pb->pwm, 0, pb->period); |
174 | pwm_disable(pb->pwm); | 181 | pwm_disable(pb->pwm); |
182 | if (pb->notify_after) | ||
183 | pb->notify_after(pb->dev, 0); | ||
175 | return 0; | 184 | return 0; |
176 | } | 185 | } |
177 | 186 | ||
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c index 02bf7bf7160b..b5abaae38e97 100644 --- a/drivers/w1/masters/ds2490.c +++ b/drivers/w1/masters/ds2490.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * dscore.c | 2 | * dscore.c |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -1024,5 +1024,5 @@ module_init(ds_init); | |||
1024 | module_exit(ds_fini); | 1024 | module_exit(ds_fini); |
1025 | 1025 | ||
1026 | MODULE_LICENSE("GPL"); | 1026 | MODULE_LICENSE("GPL"); |
1027 | MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); | 1027 | MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); |
1028 | MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)"); | 1028 | MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)"); |
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c index 334d1ccf9c92..f667c26b2195 100644 --- a/drivers/w1/masters/matrox_w1.c +++ b/drivers/w1/masters/matrox_w1.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * matrox_w1.c | 2 | * matrox_w1.c |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -39,7 +39,7 @@ | |||
39 | #include "../w1_log.h" | 39 | #include "../w1_log.h" |
40 | 40 | ||
41 | MODULE_LICENSE("GPL"); | 41 | MODULE_LICENSE("GPL"); |
42 | MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); | 42 | MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); |
43 | MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio)."); | 43 | MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio)."); |
44 | 44 | ||
45 | static struct pci_device_id matrox_w1_tbl[] = { | 45 | static struct pci_device_id matrox_w1_tbl[] = { |
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c index c37781899d90..7c8cdb8aed26 100644 --- a/drivers/w1/slaves/w1_ds2408.c +++ b/drivers/w1/slaves/w1_ds2408.c | |||
@@ -373,7 +373,7 @@ static int w1_f29_add_slave(struct w1_slave *sl) | |||
373 | static void w1_f29_remove_slave(struct w1_slave *sl) | 373 | static void w1_f29_remove_slave(struct w1_slave *sl) |
374 | { | 374 | { |
375 | int i; | 375 | int i; |
376 | for (i = NB_SYSFS_BIN_FILES; i <= 0; --i) | 376 | for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i) |
377 | sysfs_remove_bin_file(&sl->dev.kobj, | 377 | sysfs_remove_bin_file(&sl->dev.kobj, |
378 | &(w1_f29_sysfs_bin_files[i])); | 378 | &(w1_f29_sysfs_bin_files[i])); |
379 | } | 379 | } |
diff --git a/drivers/w1/slaves/w1_smem.c b/drivers/w1/slaves/w1_smem.c index cc8c02e92593..84655625c870 100644 --- a/drivers/w1/slaves/w1_smem.c +++ b/drivers/w1/slaves/w1_smem.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_smem.c | 2 | * w1_smem.c |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -32,7 +32,7 @@ | |||
32 | #include "../w1_family.h" | 32 | #include "../w1_family.h" |
33 | 33 | ||
34 | MODULE_LICENSE("GPL"); | 34 | MODULE_LICENSE("GPL"); |
35 | MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); | 35 | MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); |
36 | MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family."); | 36 | MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family."); |
37 | 37 | ||
38 | static struct w1_family w1_smem_family_01 = { | 38 | static struct w1_family w1_smem_family_01 = { |
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c index 402928b135d1..a1ef9b5b38cf 100644 --- a/drivers/w1/slaves/w1_therm.c +++ b/drivers/w1/slaves/w1_therm.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_therm.c | 2 | * w1_therm.c |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -34,7 +34,7 @@ | |||
34 | #include "../w1_family.h" | 34 | #include "../w1_family.h" |
35 | 35 | ||
36 | MODULE_LICENSE("GPL"); | 36 | MODULE_LICENSE("GPL"); |
37 | MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); | 37 | MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); |
38 | MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family."); | 38 | MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family."); |
39 | 39 | ||
40 | /* Allow the strong pullup to be disabled, but default to enabled. | 40 | /* Allow the strong pullup to be disabled, but default to enabled. |
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index 6c136c19e982..c37497823851 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1.c | 2 | * w1.c |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -42,7 +42,7 @@ | |||
42 | #include "w1_netlink.h" | 42 | #include "w1_netlink.h" |
43 | 43 | ||
44 | MODULE_LICENSE("GPL"); | 44 | MODULE_LICENSE("GPL"); |
45 | MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); | 45 | MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); |
46 | MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol."); | 46 | MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol."); |
47 | 47 | ||
48 | static int w1_timeout = 10; | 48 | static int w1_timeout = 10; |
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h index 1ce23fc6186c..4d012ca3f32c 100644 --- a/drivers/w1/w1.h +++ b/drivers/w1/w1.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1.h | 2 | * w1.h |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c index 4a099041f28a..63359797c8b1 100644 --- a/drivers/w1/w1_family.c +++ b/drivers/w1/w1_family.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_family.c | 2 | * w1_family.c |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h index 98a1ac0f4693..490cda2281bc 100644 --- a/drivers/w1/w1_family.h +++ b/drivers/w1/w1_family.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_family.h | 2 | * w1_family.h |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c index b50be3f1073d..d220bce2cee4 100644 --- a/drivers/w1/w1_int.c +++ b/drivers/w1/w1_int.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_int.c | 2 | * w1_int.c |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/w1/w1_int.h b/drivers/w1/w1_int.h index 4274082d2262..2ad7d4414bed 100644 --- a/drivers/w1/w1_int.h +++ b/drivers/w1/w1_int.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_int.h | 2 | * w1_int.h |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c index 8e8b64cfafb6..765b37b62a4f 100644 --- a/drivers/w1/w1_io.c +++ b/drivers/w1/w1_io.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_io.c | 2 | * w1_io.c |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/w1/w1_log.h b/drivers/w1/w1_log.h index e6ab7cf08f88..9c7bd62e6bdc 100644 --- a/drivers/w1/w1_log.h +++ b/drivers/w1/w1_log.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_log.h | 2 | * w1_log.h |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c index 55aabd927c60..40788c925d1c 100644 --- a/drivers/w1/w1_netlink.c +++ b/drivers/w1/w1_netlink.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_netlink.c | 2 | * w1_netlink.c |
3 | * | 3 | * |
4 | * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h index 27e950f935b1..b0922dc29658 100644 --- a/drivers/w1/w1_netlink.h +++ b/drivers/w1/w1_netlink.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * w1_netlink.h | 2 | * w1_netlink.h |
3 | * | 3 | * |
4 | * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net> |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 410fba45378d..809cbda03d7a 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c | |||
@@ -494,15 +494,16 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason, | |||
494 | asminline_call(&cmn_regs, cru_rom_addr); | 494 | asminline_call(&cmn_regs, cru_rom_addr); |
495 | die_nmi_called = 1; | 495 | die_nmi_called = 1; |
496 | spin_unlock_irqrestore(&rom_lock, rom_pl); | 496 | spin_unlock_irqrestore(&rom_lock, rom_pl); |
497 | |||
498 | if (allow_kdump) | ||
499 | hpwdt_stop(); | ||
500 | |||
497 | if (!is_icru) { | 501 | if (!is_icru) { |
498 | if (cmn_regs.u1.ral == 0) { | 502 | if (cmn_regs.u1.ral == 0) { |
499 | printk(KERN_WARNING "hpwdt: An NMI occurred, " | 503 | panic("An NMI occurred, " |
500 | "but unable to determine source.\n"); | 504 | "but unable to determine source.\n"); |
501 | } | 505 | } |
502 | } | 506 | } |
503 | |||
504 | if (allow_kdump) | ||
505 | hpwdt_stop(); | ||
506 | panic("An NMI occurred, please see the Integrated " | 507 | panic("An NMI occurred, please see the Integrated " |
507 | "Management Log for details.\n"); | 508 | "Management Log for details.\n"); |
508 | 509 | ||
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c index 7d82adac1cb2..102aed0efbf1 100644 --- a/drivers/watchdog/lantiq_wdt.c +++ b/drivers/watchdog/lantiq_wdt.c | |||
@@ -51,16 +51,16 @@ static int ltq_wdt_ok_to_close; | |||
51 | static void | 51 | static void |
52 | ltq_wdt_enable(void) | 52 | ltq_wdt_enable(void) |
53 | { | 53 | { |
54 | ltq_wdt_timeout = ltq_wdt_timeout * | 54 | unsigned long int timeout = ltq_wdt_timeout * |
55 | (ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000; | 55 | (ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000; |
56 | if (ltq_wdt_timeout > LTQ_MAX_TIMEOUT) | 56 | if (timeout > LTQ_MAX_TIMEOUT) |
57 | ltq_wdt_timeout = LTQ_MAX_TIMEOUT; | 57 | timeout = LTQ_MAX_TIMEOUT; |
58 | 58 | ||
59 | /* write the first password magic */ | 59 | /* write the first password magic */ |
60 | ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR); | 60 | ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR); |
61 | /* write the second magic plus the configuration and new timeout */ | 61 | /* write the second magic plus the configuration and new timeout */ |
62 | ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV | | 62 | ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV | |
63 | LTQ_WDT_PW2 | ltq_wdt_timeout, ltq_wdt_membase + LTQ_WDT_CR); | 63 | LTQ_WDT_PW2 | timeout, ltq_wdt_membase + LTQ_WDT_CR); |
64 | } | 64 | } |
65 | 65 | ||
66 | static void | 66 | static void |
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c index 3066a5127ca8..eaca366b7234 100644 --- a/drivers/watchdog/sbc_epx_c3.c +++ b/drivers/watchdog/sbc_epx_c3.c | |||
@@ -173,7 +173,7 @@ static struct notifier_block epx_c3_notifier = { | |||
173 | .notifier_call = epx_c3_notify_sys, | 173 | .notifier_call = epx_c3_notify_sys, |
174 | }; | 174 | }; |
175 | 175 | ||
176 | static const char banner[] __initdata = KERN_INFO PFX | 176 | static const char banner[] __initconst = KERN_INFO PFX |
177 | "Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n"; | 177 | "Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n"; |
178 | 178 | ||
179 | static int __init watchdog_init(void) | 179 | static int __init watchdog_init(void) |
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index d33520d0b4c9..1199da0f98cf 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c | |||
@@ -59,7 +59,7 @@ static struct watchdog_device *wdd; | |||
59 | 59 | ||
60 | static int watchdog_ping(struct watchdog_device *wddev) | 60 | static int watchdog_ping(struct watchdog_device *wddev) |
61 | { | 61 | { |
62 | if (test_bit(WDOG_ACTIVE, &wdd->status)) { | 62 | if (test_bit(WDOG_ACTIVE, &wddev->status)) { |
63 | if (wddev->ops->ping) | 63 | if (wddev->ops->ping) |
64 | return wddev->ops->ping(wddev); /* ping the watchdog */ | 64 | return wddev->ops->ping(wddev); /* ping the watchdog */ |
65 | else | 65 | else |
@@ -81,12 +81,12 @@ static int watchdog_start(struct watchdog_device *wddev) | |||
81 | { | 81 | { |
82 | int err; | 82 | int err; |
83 | 83 | ||
84 | if (!test_bit(WDOG_ACTIVE, &wdd->status)) { | 84 | if (!test_bit(WDOG_ACTIVE, &wddev->status)) { |
85 | err = wddev->ops->start(wddev); | 85 | err = wddev->ops->start(wddev); |
86 | if (err < 0) | 86 | if (err < 0) |
87 | return err; | 87 | return err; |
88 | 88 | ||
89 | set_bit(WDOG_ACTIVE, &wdd->status); | 89 | set_bit(WDOG_ACTIVE, &wddev->status); |
90 | } | 90 | } |
91 | return 0; | 91 | return 0; |
92 | } | 92 | } |
@@ -105,18 +105,18 @@ static int watchdog_stop(struct watchdog_device *wddev) | |||
105 | { | 105 | { |
106 | int err = -EBUSY; | 106 | int err = -EBUSY; |
107 | 107 | ||
108 | if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) { | 108 | if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) { |
109 | pr_info("%s: nowayout prevents watchdog to be stopped!\n", | 109 | pr_info("%s: nowayout prevents watchdog to be stopped!\n", |
110 | wdd->info->identity); | 110 | wddev->info->identity); |
111 | return err; | 111 | return err; |
112 | } | 112 | } |
113 | 113 | ||
114 | if (test_bit(WDOG_ACTIVE, &wdd->status)) { | 114 | if (test_bit(WDOG_ACTIVE, &wddev->status)) { |
115 | err = wddev->ops->stop(wddev); | 115 | err = wddev->ops->stop(wddev); |
116 | if (err < 0) | 116 | if (err < 0) |
117 | return err; | 117 | return err; |
118 | 118 | ||
119 | clear_bit(WDOG_ACTIVE, &wdd->status); | 119 | clear_bit(WDOG_ACTIVE, &wddev->status); |
120 | } | 120 | } |
121 | return 0; | 121 | return 0; |
122 | } | 122 | } |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index da70f5c32eb9..7523719bf8a4 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -54,7 +54,7 @@ | |||
54 | * This lock protects updates to the following mapping and reference-count | 54 | * This lock protects updates to the following mapping and reference-count |
55 | * arrays. The lock does not need to be acquired to read the mapping tables. | 55 | * arrays. The lock does not need to be acquired to read the mapping tables. |
56 | */ | 56 | */ |
57 | static DEFINE_SPINLOCK(irq_mapping_update_lock); | 57 | static DEFINE_MUTEX(irq_mapping_update_lock); |
58 | 58 | ||
59 | static LIST_HEAD(xen_irq_list_head); | 59 | static LIST_HEAD(xen_irq_list_head); |
60 | 60 | ||
@@ -631,7 +631,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
631 | int irq = -1; | 631 | int irq = -1; |
632 | struct physdev_irq irq_op; | 632 | struct physdev_irq irq_op; |
633 | 633 | ||
634 | spin_lock(&irq_mapping_update_lock); | 634 | mutex_lock(&irq_mapping_update_lock); |
635 | 635 | ||
636 | irq = find_irq_by_gsi(gsi); | 636 | irq = find_irq_by_gsi(gsi); |
637 | if (irq != -1) { | 637 | if (irq != -1) { |
@@ -684,7 +684,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
684 | handle_edge_irq, name); | 684 | handle_edge_irq, name); |
685 | 685 | ||
686 | out: | 686 | out: |
687 | spin_unlock(&irq_mapping_update_lock); | 687 | mutex_unlock(&irq_mapping_update_lock); |
688 | 688 | ||
689 | return irq; | 689 | return irq; |
690 | } | 690 | } |
@@ -710,7 +710,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | |||
710 | { | 710 | { |
711 | int irq, ret; | 711 | int irq, ret; |
712 | 712 | ||
713 | spin_lock(&irq_mapping_update_lock); | 713 | mutex_lock(&irq_mapping_update_lock); |
714 | 714 | ||
715 | irq = xen_allocate_irq_dynamic(); | 715 | irq = xen_allocate_irq_dynamic(); |
716 | if (irq == -1) | 716 | if (irq == -1) |
@@ -724,10 +724,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | |||
724 | if (ret < 0) | 724 | if (ret < 0) |
725 | goto error_irq; | 725 | goto error_irq; |
726 | out: | 726 | out: |
727 | spin_unlock(&irq_mapping_update_lock); | 727 | mutex_unlock(&irq_mapping_update_lock); |
728 | return irq; | 728 | return irq; |
729 | error_irq: | 729 | error_irq: |
730 | spin_unlock(&irq_mapping_update_lock); | 730 | mutex_unlock(&irq_mapping_update_lock); |
731 | xen_free_irq(irq); | 731 | xen_free_irq(irq); |
732 | return -1; | 732 | return -1; |
733 | } | 733 | } |
@@ -740,7 +740,7 @@ int xen_destroy_irq(int irq) | |||
740 | struct irq_info *info = info_for_irq(irq); | 740 | struct irq_info *info = info_for_irq(irq); |
741 | int rc = -ENOENT; | 741 | int rc = -ENOENT; |
742 | 742 | ||
743 | spin_lock(&irq_mapping_update_lock); | 743 | mutex_lock(&irq_mapping_update_lock); |
744 | 744 | ||
745 | desc = irq_to_desc(irq); | 745 | desc = irq_to_desc(irq); |
746 | if (!desc) | 746 | if (!desc) |
@@ -766,7 +766,7 @@ int xen_destroy_irq(int irq) | |||
766 | xen_free_irq(irq); | 766 | xen_free_irq(irq); |
767 | 767 | ||
768 | out: | 768 | out: |
769 | spin_unlock(&irq_mapping_update_lock); | 769 | mutex_unlock(&irq_mapping_update_lock); |
770 | return rc; | 770 | return rc; |
771 | } | 771 | } |
772 | 772 | ||
@@ -776,7 +776,7 @@ int xen_irq_from_pirq(unsigned pirq) | |||
776 | 776 | ||
777 | struct irq_info *info; | 777 | struct irq_info *info; |
778 | 778 | ||
779 | spin_lock(&irq_mapping_update_lock); | 779 | mutex_lock(&irq_mapping_update_lock); |
780 | 780 | ||
781 | list_for_each_entry(info, &xen_irq_list_head, list) { | 781 | list_for_each_entry(info, &xen_irq_list_head, list) { |
782 | if (info == NULL || info->type != IRQT_PIRQ) | 782 | if (info == NULL || info->type != IRQT_PIRQ) |
@@ -787,7 +787,7 @@ int xen_irq_from_pirq(unsigned pirq) | |||
787 | } | 787 | } |
788 | irq = -1; | 788 | irq = -1; |
789 | out: | 789 | out: |
790 | spin_unlock(&irq_mapping_update_lock); | 790 | mutex_unlock(&irq_mapping_update_lock); |
791 | 791 | ||
792 | return irq; | 792 | return irq; |
793 | } | 793 | } |
@@ -802,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
802 | { | 802 | { |
803 | int irq; | 803 | int irq; |
804 | 804 | ||
805 | spin_lock(&irq_mapping_update_lock); | 805 | mutex_lock(&irq_mapping_update_lock); |
806 | 806 | ||
807 | irq = evtchn_to_irq[evtchn]; | 807 | irq = evtchn_to_irq[evtchn]; |
808 | 808 | ||
@@ -818,7 +818,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
818 | } | 818 | } |
819 | 819 | ||
820 | out: | 820 | out: |
821 | spin_unlock(&irq_mapping_update_lock); | 821 | mutex_unlock(&irq_mapping_update_lock); |
822 | 822 | ||
823 | return irq; | 823 | return irq; |
824 | } | 824 | } |
@@ -829,7 +829,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
829 | struct evtchn_bind_ipi bind_ipi; | 829 | struct evtchn_bind_ipi bind_ipi; |
830 | int evtchn, irq; | 830 | int evtchn, irq; |
831 | 831 | ||
832 | spin_lock(&irq_mapping_update_lock); | 832 | mutex_lock(&irq_mapping_update_lock); |
833 | 833 | ||
834 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | 834 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; |
835 | 835 | ||
@@ -853,7 +853,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
853 | } | 853 | } |
854 | 854 | ||
855 | out: | 855 | out: |
856 | spin_unlock(&irq_mapping_update_lock); | 856 | mutex_unlock(&irq_mapping_update_lock); |
857 | return irq; | 857 | return irq; |
858 | } | 858 | } |
859 | 859 | ||
@@ -878,7 +878,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
878 | struct evtchn_bind_virq bind_virq; | 878 | struct evtchn_bind_virq bind_virq; |
879 | int evtchn, irq; | 879 | int evtchn, irq; |
880 | 880 | ||
881 | spin_lock(&irq_mapping_update_lock); | 881 | mutex_lock(&irq_mapping_update_lock); |
882 | 882 | ||
883 | irq = per_cpu(virq_to_irq, cpu)[virq]; | 883 | irq = per_cpu(virq_to_irq, cpu)[virq]; |
884 | 884 | ||
@@ -903,7 +903,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
903 | } | 903 | } |
904 | 904 | ||
905 | out: | 905 | out: |
906 | spin_unlock(&irq_mapping_update_lock); | 906 | mutex_unlock(&irq_mapping_update_lock); |
907 | 907 | ||
908 | return irq; | 908 | return irq; |
909 | } | 909 | } |
@@ -913,7 +913,7 @@ static void unbind_from_irq(unsigned int irq) | |||
913 | struct evtchn_close close; | 913 | struct evtchn_close close; |
914 | int evtchn = evtchn_from_irq(irq); | 914 | int evtchn = evtchn_from_irq(irq); |
915 | 915 | ||
916 | spin_lock(&irq_mapping_update_lock); | 916 | mutex_lock(&irq_mapping_update_lock); |
917 | 917 | ||
918 | if (VALID_EVTCHN(evtchn)) { | 918 | if (VALID_EVTCHN(evtchn)) { |
919 | close.port = evtchn; | 919 | close.port = evtchn; |
@@ -943,7 +943,7 @@ static void unbind_from_irq(unsigned int irq) | |||
943 | 943 | ||
944 | xen_free_irq(irq); | 944 | xen_free_irq(irq); |
945 | 945 | ||
946 | spin_unlock(&irq_mapping_update_lock); | 946 | mutex_unlock(&irq_mapping_update_lock); |
947 | } | 947 | } |
948 | 948 | ||
949 | int bind_evtchn_to_irqhandler(unsigned int evtchn, | 949 | int bind_evtchn_to_irqhandler(unsigned int evtchn, |
@@ -1279,7 +1279,7 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
1279 | will also be masked. */ | 1279 | will also be masked. */ |
1280 | disable_irq(irq); | 1280 | disable_irq(irq); |
1281 | 1281 | ||
1282 | spin_lock(&irq_mapping_update_lock); | 1282 | mutex_lock(&irq_mapping_update_lock); |
1283 | 1283 | ||
1284 | /* After resume the irq<->evtchn mappings are all cleared out */ | 1284 | /* After resume the irq<->evtchn mappings are all cleared out */ |
1285 | BUG_ON(evtchn_to_irq[evtchn] != -1); | 1285 | BUG_ON(evtchn_to_irq[evtchn] != -1); |
@@ -1289,7 +1289,7 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
1289 | 1289 | ||
1290 | xen_irq_info_evtchn_init(irq, evtchn); | 1290 | xen_irq_info_evtchn_init(irq, evtchn); |
1291 | 1291 | ||
1292 | spin_unlock(&irq_mapping_update_lock); | 1292 | mutex_unlock(&irq_mapping_update_lock); |
1293 | 1293 | ||
1294 | /* new event channels are always bound to cpu 0 */ | 1294 | /* new event channels are always bound to cpu 0 */ |
1295 | irq_set_affinity(irq, cpumask_of(0)); | 1295 | irq_set_affinity(irq, cpumask_of(0)); |
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index 1b4afd81f872..6ea852e25162 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c | |||
@@ -70,6 +70,7 @@ | |||
70 | #include <linux/kernel.h> | 70 | #include <linux/kernel.h> |
71 | #include <linux/mm.h> | 71 | #include <linux/mm.h> |
72 | #include <linux/mman.h> | 72 | #include <linux/mman.h> |
73 | #include <linux/module.h> | ||
73 | #include <linux/workqueue.h> | 74 | #include <linux/workqueue.h> |
74 | #include <xen/balloon.h> | 75 | #include <xen/balloon.h> |
75 | #include <xen/tmem.h> | 76 | #include <xen/tmem.h> |