diff options
665 files changed, 14581 insertions, 6698 deletions
diff --git a/Documentation/DocBook/sh.tmpl b/Documentation/DocBook/sh.tmpl index d858d92cf6d9..4a38f604fa66 100644 --- a/Documentation/DocBook/sh.tmpl +++ b/Documentation/DocBook/sh.tmpl | |||
@@ -79,10 +79,6 @@ | |||
79 | </sect2> | 79 | </sect2> |
80 | </sect1> | 80 | </sect1> |
81 | </chapter> | 81 | </chapter> |
82 | <chapter id="clk"> | ||
83 | <title>Clock Framework Extensions</title> | ||
84 | !Iinclude/linux/sh_clk.h | ||
85 | </chapter> | ||
86 | <chapter id="mach"> | 82 | <chapter id="mach"> |
87 | <title>Machine Specific Interfaces</title> | 83 | <title>Machine Specific Interfaces</title> |
88 | <sect1 id="dreamcast"> | 84 | <sect1 id="dreamcast"> |
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl index 4d4ce0e61e42..b4665b9c40b0 100644 --- a/Documentation/DocBook/uio-howto.tmpl +++ b/Documentation/DocBook/uio-howto.tmpl | |||
@@ -16,7 +16,7 @@ | |||
16 | </orgname> | 16 | </orgname> |
17 | 17 | ||
18 | <address> | 18 | <address> |
19 | <email>hjk@linutronix.de</email> | 19 | <email>hjk@hansjkoch.de</email> |
20 | </address> | 20 | </address> |
21 | </affiliation> | 21 | </affiliation> |
22 | </author> | 22 | </author> |
@@ -114,7 +114,7 @@ GPL version 2. | |||
114 | 114 | ||
115 | <para>If you know of any translations for this document, or you are | 115 | <para>If you know of any translations for this document, or you are |
116 | interested in translating it, please email me | 116 | interested in translating it, please email me |
117 | <email>hjk@linutronix.de</email>. | 117 | <email>hjk@hansjkoch.de</email>. |
118 | </para> | 118 | </para> |
119 | </sect1> | 119 | </sect1> |
120 | 120 | ||
@@ -171,7 +171,7 @@ interested in translating it, please email me | |||
171 | <title>Feedback</title> | 171 | <title>Feedback</title> |
172 | <para>Find something wrong with this document? (Or perhaps something | 172 | <para>Find something wrong with this document? (Or perhaps something |
173 | right?) I would love to hear from you. Please email me at | 173 | right?) I would love to hear from you. Please email me at |
174 | <email>hjk@linutronix.de</email>.</para> | 174 | <email>hjk@hansjkoch.de</email>.</para> |
175 | </sect1> | 175 | </sect1> |
176 | </chapter> | 176 | </chapter> |
177 | 177 | ||
diff --git a/Documentation/arm/OMAP/DSS b/Documentation/arm/OMAP/DSS index 0af0e9eed5d6..888ae7b83ae4 100644 --- a/Documentation/arm/OMAP/DSS +++ b/Documentation/arm/OMAP/DSS | |||
@@ -255,9 +255,10 @@ framebuffer parameters. | |||
255 | Kernel boot arguments | 255 | Kernel boot arguments |
256 | --------------------- | 256 | --------------------- |
257 | 257 | ||
258 | vram=<size> | 258 | vram=<size>[,<physaddr>] |
259 | - Amount of total VRAM to preallocate. For example, "10M". omapfb | 259 | - Amount of total VRAM to preallocate and optionally a physical start |
260 | allocates memory for framebuffers from VRAM. | 260 | memory address. For example, "10M". omapfb allocates memory for |
261 | framebuffers from VRAM. | ||
261 | 262 | ||
262 | omapfb.mode=<display>:<mode>[,...] | 263 | omapfb.mode=<display>:<mode>[,...] |
263 | - Default video mode for specified displays. For example, | 264 | - Default video mode for specified displays. For example, |
diff --git a/Documentation/development-process/2.Process b/Documentation/development-process/2.Process index 97726eba6102..911a45186340 100644 --- a/Documentation/development-process/2.Process +++ b/Documentation/development-process/2.Process | |||
@@ -154,7 +154,7 @@ The stages that a patch goes through are, generally: | |||
154 | inclusion, it should be accepted by a relevant subsystem maintainer - | 154 | inclusion, it should be accepted by a relevant subsystem maintainer - |
155 | though this acceptance is not a guarantee that the patch will make it | 155 | though this acceptance is not a guarantee that the patch will make it |
156 | all the way to the mainline. The patch will show up in the maintainer's | 156 | all the way to the mainline. The patch will show up in the maintainer's |
157 | subsystem tree and into the staging trees (described below). When the | 157 | subsystem tree and into the -next trees (described below). When the |
158 | process works, this step leads to more extensive review of the patch and | 158 | process works, this step leads to more extensive review of the patch and |
159 | the discovery of any problems resulting from the integration of this | 159 | the discovery of any problems resulting from the integration of this |
160 | patch with work being done by others. | 160 | patch with work being done by others. |
@@ -236,7 +236,7 @@ finding the right maintainer. Sending patches directly to Linus is not | |||
236 | normally the right way to go. | 236 | normally the right way to go. |
237 | 237 | ||
238 | 238 | ||
239 | 2.4: STAGING TREES | 239 | 2.4: NEXT TREES |
240 | 240 | ||
241 | The chain of subsystem trees guides the flow of patches into the kernel, | 241 | The chain of subsystem trees guides the flow of patches into the kernel, |
242 | but it also raises an interesting question: what if somebody wants to look | 242 | but it also raises an interesting question: what if somebody wants to look |
@@ -250,7 +250,7 @@ changes land in the mainline kernel. One could pull changes from all of | |||
250 | the interesting subsystem trees, but that would be a big and error-prone | 250 | the interesting subsystem trees, but that would be a big and error-prone |
251 | job. | 251 | job. |
252 | 252 | ||
253 | The answer comes in the form of staging trees, where subsystem trees are | 253 | The answer comes in the form of -next trees, where subsystem trees are |
254 | collected for testing and review. The older of these trees, maintained by | 254 | collected for testing and review. The older of these trees, maintained by |
255 | Andrew Morton, is called "-mm" (for memory management, which is how it got | 255 | Andrew Morton, is called "-mm" (for memory management, which is how it got |
256 | started). The -mm tree integrates patches from a long list of subsystem | 256 | started). The -mm tree integrates patches from a long list of subsystem |
@@ -275,7 +275,7 @@ directory at: | |||
275 | Use of the MMOTM tree is likely to be a frustrating experience, though; | 275 | Use of the MMOTM tree is likely to be a frustrating experience, though; |
276 | there is a definite chance that it will not even compile. | 276 | there is a definite chance that it will not even compile. |
277 | 277 | ||
278 | The other staging tree, started more recently, is linux-next, maintained by | 278 | The other -next tree, started more recently, is linux-next, maintained by |
279 | Stephen Rothwell. The linux-next tree is, by design, a snapshot of what | 279 | Stephen Rothwell. The linux-next tree is, by design, a snapshot of what |
280 | the mainline is expected to look like after the next merge window closes. | 280 | the mainline is expected to look like after the next merge window closes. |
281 | Linux-next trees are announced on the linux-kernel and linux-next mailing | 281 | Linux-next trees are announced on the linux-kernel and linux-next mailing |
@@ -303,12 +303,25 @@ volatility of linux-next tends to make it a difficult development target. | |||
303 | See http://lwn.net/Articles/289013/ for more information on this topic, and | 303 | See http://lwn.net/Articles/289013/ for more information on this topic, and |
304 | stay tuned; much is still in flux where linux-next is involved. | 304 | stay tuned; much is still in flux where linux-next is involved. |
305 | 305 | ||
306 | Besides the mmotm and linux-next trees, the kernel source tree now contains | 306 | 2.4.1: STAGING TREES |
307 | the drivers/staging/ directory and many sub-directories for drivers or | 307 | |
308 | filesystems that are on their way to being added to the kernel tree | 308 | The kernel source tree now contains the drivers/staging/ directory, where |
309 | proper, but they remain in drivers/staging/ while they still need more | 309 | many sub-directories for drivers or filesystems that are on their way to |
310 | work. | 310 | being added to the kernel tree live. They remain in drivers/staging while |
311 | 311 | they still need more work; once complete, they can be moved into the | |
312 | kernel proper. This is a way to keep track of drivers that aren't | ||
313 | up to Linux kernel coding or quality standards, but people may want to use | ||
314 | them and track development. | ||
315 | |||
316 | Greg Kroah-Hartman currently (as of 2.6.36) maintains the staging tree. | ||
317 | Drivers that still need work are sent to him, with each driver having | ||
318 | its own subdirectory in drivers/staging/. Along with the driver source | ||
319 | files, a TODO file should be present in the directory as well. The TODO | ||
320 | file lists the pending work that the driver needs for acceptance into | ||
321 | the kernel proper, as well as a list of people that should be Cc'd for any | ||
322 | patches to the driver. Staging drivers that don't currently build should | ||
323 | have their config entries depend upon CONFIG_BROKEN. Once they can | ||
324 | be successfully built without outside patches, CONFIG_BROKEN can be removed. | ||
312 | 325 | ||
313 | 2.5: TOOLS | 326 | 2.5: TOOLS |
314 | 327 | ||
diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX index a618fd99c9f0..30a70542e823 100644 --- a/Documentation/fb/00-INDEX +++ b/Documentation/fb/00-INDEX | |||
@@ -4,33 +4,41 @@ please mail me. | |||
4 | Geert Uytterhoeven <geert@linux-m68k.org> | 4 | Geert Uytterhoeven <geert@linux-m68k.org> |
5 | 5 | ||
6 | 00-INDEX | 6 | 00-INDEX |
7 | - this file | 7 | - this file. |
8 | arkfb.txt | 8 | arkfb.txt |
9 | - info on the fbdev driver for ARK Logic chips. | 9 | - info on the fbdev driver for ARK Logic chips. |
10 | aty128fb.txt | 10 | aty128fb.txt |
11 | - info on the ATI Rage128 frame buffer driver. | 11 | - info on the ATI Rage128 frame buffer driver. |
12 | cirrusfb.txt | 12 | cirrusfb.txt |
13 | - info on the driver for Cirrus Logic chipsets. | 13 | - info on the driver for Cirrus Logic chipsets. |
14 | cmap_xfbdev.txt | ||
15 | - an introduction to fbdev's cmap structures. | ||
14 | deferred_io.txt | 16 | deferred_io.txt |
15 | - an introduction to deferred IO. | 17 | - an introduction to deferred IO. |
18 | efifb.txt | ||
19 | - info on the EFI platform driver for Intel based Apple computers. | ||
20 | ep93xx-fb.txt | ||
21 | - info on the driver for EP93xx LCD controller. | ||
16 | fbcon.txt | 22 | fbcon.txt |
17 | - intro to and usage guide for the framebuffer console (fbcon). | 23 | - intro to and usage guide for the framebuffer console (fbcon). |
18 | framebuffer.txt | 24 | framebuffer.txt |
19 | - introduction to frame buffer devices. | 25 | - introduction to frame buffer devices. |
20 | imacfb.txt | 26 | gxfb.txt |
21 | - info on the generic EFI platform driver for Intel based Macs. | 27 | - info on the framebuffer driver for AMD Geode GX2 based processors. |
22 | intel810.txt | 28 | intel810.txt |
23 | - documentation for the Intel 810/815 framebuffer driver. | 29 | - documentation for the Intel 810/815 framebuffer driver. |
24 | intelfb.txt | 30 | intelfb.txt |
25 | - docs for Intel 830M/845G/852GM/855GM/865G/915G/945G fb driver. | 31 | - docs for Intel 830M/845G/852GM/855GM/865G/915G/945G fb driver. |
26 | internals.txt | 32 | internals.txt |
27 | - quick overview of frame buffer device internals. | 33 | - quick overview of frame buffer device internals. |
34 | lxfb.txt | ||
35 | - info on the framebuffer driver for AMD Geode LX based processors. | ||
28 | matroxfb.txt | 36 | matroxfb.txt |
29 | - info on the Matrox framebuffer driver for Alpha, Intel and PPC. | 37 | - info on the Matrox framebuffer driver for Alpha, Intel and PPC. |
38 | metronomefb.txt | ||
39 | - info on the driver for the Metronome display controller. | ||
30 | modedb.txt | 40 | modedb.txt |
31 | - info on the video mode database. | 41 | - info on the video mode database. |
32 | matroxfb.txt | ||
33 | - info on the Matrox frame buffer driver. | ||
34 | pvr2fb.txt | 42 | pvr2fb.txt |
35 | - info on the PowerVR 2 frame buffer driver. | 43 | - info on the PowerVR 2 frame buffer driver. |
36 | pxafb.txt | 44 | pxafb.txt |
@@ -39,13 +47,23 @@ s3fb.txt | |||
39 | - info on the fbdev driver for S3 Trio/Virge chips. | 47 | - info on the fbdev driver for S3 Trio/Virge chips. |
40 | sa1100fb.txt | 48 | sa1100fb.txt |
41 | - information about the driver for the SA-1100 LCD controller. | 49 | - information about the driver for the SA-1100 LCD controller. |
50 | sh7760fb.txt | ||
51 | - info on the SH7760/SH7763 integrated LCDC Framebuffer driver. | ||
42 | sisfb.txt | 52 | sisfb.txt |
43 | - info on the framebuffer device driver for various SiS chips. | 53 | - info on the framebuffer device driver for various SiS chips. |
44 | sstfb.txt | 54 | sstfb.txt |
45 | - info on the frame buffer driver for 3dfx' Voodoo Graphics boards. | 55 | - info on the frame buffer driver for 3dfx' Voodoo Graphics boards. |
46 | tgafb.txt | 56 | tgafb.txt |
47 | - info on the TGA (DECChip 21030) frame buffer driver | 57 | - info on the TGA (DECChip 21030) frame buffer driver. |
58 | tridentfb.txt | ||
59 | info on the framebuffer driver for some Trident chip based cards. | ||
60 | uvesafb.txt | ||
61 | - info on the userspace VESA (VBE2+ compliant) frame buffer device. | ||
48 | vesafb.txt | 62 | vesafb.txt |
49 | - info on the VESA frame buffer device | 63 | - info on the VESA frame buffer device. |
64 | viafb.modes | ||
65 | - list of modes for VIA Integration Graphic Chip. | ||
66 | viafb.txt | ||
67 | - info on the VIA Integration Graphic Chip console framebuffer driver. | ||
50 | vt8623fb.txt | 68 | vt8623fb.txt |
51 | - info on the fb driver for the graphics core in VIA VT8623 chipsets. | 69 | - info on the fb driver for the graphics core in VIA VT8623 chipsets. |
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index d8f36f984faa..6c2f55e05f13 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -554,3 +554,13 @@ Why: This is a legacy interface which have been replaced by a more | |||
554 | Who: NeilBrown <neilb@suse.de> | 554 | Who: NeilBrown <neilb@suse.de> |
555 | 555 | ||
556 | ---------------------------- | 556 | ---------------------------- |
557 | |||
558 | What: i2c_adapter.id | ||
559 | When: June 2011 | ||
560 | Why: This field is deprecated. I2C device drivers shouldn't change their | ||
561 | behavior based on the underlying I2C adapter. Instead, the I2C | ||
562 | adapter driver should instantiate the I2C devices and provide the | ||
563 | needed platform-specific information. | ||
564 | Who: Jean Delvare <khali@linux-fr.org> | ||
565 | |||
566 | ---------------------------- | ||
diff --git a/Documentation/filesystems/configfs/configfs_example_explicit.c b/Documentation/filesystems/configfs/configfs_example_explicit.c index d428cc9f07f3..fd53869f5633 100644 --- a/Documentation/filesystems/configfs/configfs_example_explicit.c +++ b/Documentation/filesystems/configfs/configfs_example_explicit.c | |||
@@ -89,7 +89,7 @@ static ssize_t childless_storeme_write(struct childless *childless, | |||
89 | char *p = (char *) page; | 89 | char *p = (char *) page; |
90 | 90 | ||
91 | tmp = simple_strtoul(p, &p, 10); | 91 | tmp = simple_strtoul(p, &p, 10); |
92 | if (!p || (*p && (*p != '\n'))) | 92 | if ((*p != '\0') && (*p != '\n')) |
93 | return -EINVAL; | 93 | return -EINVAL; |
94 | 94 | ||
95 | if (tmp > INT_MAX) | 95 | if (tmp > INT_MAX) |
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt index 9633da01ff46..792faa3c06cf 100644 --- a/Documentation/gpio.txt +++ b/Documentation/gpio.txt | |||
@@ -617,6 +617,16 @@ and have the following read/write attributes: | |||
617 | is configured as an output, this value may be written; | 617 | is configured as an output, this value may be written; |
618 | any nonzero value is treated as high. | 618 | any nonzero value is treated as high. |
619 | 619 | ||
620 | If the pin can be configured as interrupt-generating interrupt | ||
621 | and if it has been configured to generate interrupts (see the | ||
622 | description of "edge"), you can poll(2) on that file and | ||
623 | poll(2) will return whenever the interrupt was triggered. If | ||
624 | you use poll(2), set the events POLLPRI and POLLERR. If you | ||
625 | use select(2), set the file descriptor in exceptfds. After | ||
626 | poll(2) returns, either lseek(2) to the beginning of the sysfs | ||
627 | file and read the new value or close the file and re-open it | ||
628 | to read the value. | ||
629 | |||
620 | "edge" ... reads as either "none", "rising", "falling", or | 630 | "edge" ... reads as either "none", "rising", "falling", or |
621 | "both". Write these strings to select the signal edge(s) | 631 | "both". Write these strings to select the signal edge(s) |
622 | that will make poll(2) on the "value" file return. | 632 | that will make poll(2) on the "value" file return. |
diff --git a/Documentation/hwmon/lm93 b/Documentation/hwmon/lm93 index ac711f357faf..7a10616d0b44 100644 --- a/Documentation/hwmon/lm93 +++ b/Documentation/hwmon/lm93 | |||
@@ -11,7 +11,7 @@ Authors: | |||
11 | Mark M. Hoffman <mhoffman@lightlink.com> | 11 | Mark M. Hoffman <mhoffman@lightlink.com> |
12 | Ported to 2.6 by Eric J. Bowersox <ericb@aspsys.com> | 12 | Ported to 2.6 by Eric J. Bowersox <ericb@aspsys.com> |
13 | Adapted to 2.6.20 by Carsten Emde <ce@osadl.org> | 13 | Adapted to 2.6.20 by Carsten Emde <ce@osadl.org> |
14 | Modified for mainline integration by Hans J. Koch <hjk@linutronix.de> | 14 | Modified for mainline integration by Hans J. Koch <hjk@hansjkoch.de> |
15 | 15 | ||
16 | Module Parameters | 16 | Module Parameters |
17 | ----------------- | 17 | ----------------- |
diff --git a/Documentation/hwmon/max6650 b/Documentation/hwmon/max6650 index 8be7beb9e3e8..c565650fcfc6 100644 --- a/Documentation/hwmon/max6650 +++ b/Documentation/hwmon/max6650 | |||
@@ -8,7 +8,7 @@ Supported chips: | |||
8 | Datasheet: http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf | 8 | Datasheet: http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf |
9 | 9 | ||
10 | Authors: | 10 | Authors: |
11 | Hans J. Koch <hjk@linutronix.de> | 11 | Hans J. Koch <hjk@hansjkoch.de> |
12 | John Morris <john.morris@spirentcom.com> | 12 | John Morris <john.morris@spirentcom.com> |
13 | Claus Gindhart <claus.gindhart@kontron.com> | 13 | Claus Gindhart <claus.gindhart@kontron.com> |
14 | 14 | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 92e83e53148f..cdd2a6e8a3b7 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -2385,6 +2385,11 @@ and is between 256 and 4096 characters. It is defined in the file | |||
2385 | improve throughput, but will also increase the | 2385 | improve throughput, but will also increase the |
2386 | amount of memory reserved for use by the client. | 2386 | amount of memory reserved for use by the client. |
2387 | 2387 | ||
2388 | swapaccount[=0|1] | ||
2389 | [KNL] Enable accounting of swap in memory resource | ||
2390 | controller if no parameter or 1 is given or disable | ||
2391 | it if 0 is given (See Documentation/cgroups/memory.txt) | ||
2392 | |||
2388 | swiotlb= [IA-64] Number of I/O TLB slabs | 2393 | swiotlb= [IA-64] Number of I/O TLB slabs |
2389 | 2394 | ||
2390 | switches= [HW,M68k] | 2395 | switches= [HW,M68k] |
diff --git a/Documentation/power/opp.txt b/Documentation/power/opp.txt index 44d87ad3cea9..cd445582d1f8 100644 --- a/Documentation/power/opp.txt +++ b/Documentation/power/opp.txt | |||
@@ -37,6 +37,9 @@ Typical usage of the OPP library is as follows: | |||
37 | SoC framework -> modifies on required cases certain OPPs -> OPP layer | 37 | SoC framework -> modifies on required cases certain OPPs -> OPP layer |
38 | -> queries to search/retrieve information -> | 38 | -> queries to search/retrieve information -> |
39 | 39 | ||
40 | Architectures that provide a SoC framework for OPP should select ARCH_HAS_OPP | ||
41 | to make the OPP layer available. | ||
42 | |||
40 | OPP layer expects each domain to be represented by a unique device pointer. SoC | 43 | OPP layer expects each domain to be represented by a unique device pointer. SoC |
41 | framework registers a set of initial OPPs per device with the OPP layer. This | 44 | framework registers a set of initial OPPs per device with the OPP layer. This |
42 | list is expected to be an optimally small number typically around 5 per device. | 45 | list is expected to be an optimally small number typically around 5 per device. |
diff --git a/Documentation/sh/clk.txt b/Documentation/sh/clk.txt deleted file mode 100644 index 114b595cfa97..000000000000 --- a/Documentation/sh/clk.txt +++ /dev/null | |||
@@ -1,32 +0,0 @@ | |||
1 | Clock framework on SuperH architecture | ||
2 | |||
3 | The framework on SH extends existing API by the function clk_set_rate_ex, | ||
4 | which prototype is as follows: | ||
5 | |||
6 | clk_set_rate_ex (struct clk *clk, unsigned long rate, int algo_id) | ||
7 | |||
8 | The algo_id parameter is used to specify algorithm used to recalculate clocks, | ||
9 | adjanced to clock, specified as first argument. It is assumed that algo_id==0 | ||
10 | means no changes to adjanced clock | ||
11 | |||
12 | Internally, the clk_set_rate_ex forwards request to clk->ops->set_rate method, | ||
13 | if it is present in ops structure. The method should set the clock rate and adjust | ||
14 | all needed clocks according to the passed algo_id. | ||
15 | Exact values for algo_id are machine-dependent. For the sh7722, the following | ||
16 | values are defined: | ||
17 | |||
18 | NO_CHANGE = 0, | ||
19 | IUS_N1_N1, /* I:U = N:1, U:Sh = N:1 */ | ||
20 | IUS_322, /* I:U:Sh = 3:2:2 */ | ||
21 | IUS_522, /* I:U:Sh = 5:2:2 */ | ||
22 | IUS_N11, /* I:U:Sh = N:1:1 */ | ||
23 | SB_N1, /* Sh:B = N:1 */ | ||
24 | SB3_N1, /* Sh:B3 = N:1 */ | ||
25 | SB3_32, /* Sh:B3 = 3:2 */ | ||
26 | SB3_43, /* Sh:B3 = 4:3 */ | ||
27 | SB3_54, /* Sh:B3 = 5:4 */ | ||
28 | BP_N1, /* B:P = N:1 */ | ||
29 | IP_N1 /* I:P = N:1 */ | ||
30 | |||
31 | Each of these constants means relation between clocks that can be set via the FRQCR | ||
32 | register | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 88b74a75d932..b3be8b3d0437 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -945,7 +945,7 @@ M: Magnus Damm <magnus.damm@gmail.com> | |||
945 | L: linux-sh@vger.kernel.org | 945 | L: linux-sh@vger.kernel.org |
946 | W: http://oss.renesas.com | 946 | W: http://oss.renesas.com |
947 | Q: http://patchwork.kernel.org/project/linux-sh/list/ | 947 | Q: http://patchwork.kernel.org/project/linux-sh/list/ |
948 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/genesis-2.6.git | 948 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git rmobile-latest |
949 | S: Supported | 949 | S: Supported |
950 | F: arch/arm/mach-shmobile/ | 950 | F: arch/arm/mach-shmobile/ |
951 | F: drivers/sh/ | 951 | F: drivers/sh/ |
@@ -1359,7 +1359,7 @@ F: include/net/bluetooth/ | |||
1359 | 1359 | ||
1360 | BONDING DRIVER | 1360 | BONDING DRIVER |
1361 | M: Jay Vosburgh <fubar@us.ibm.com> | 1361 | M: Jay Vosburgh <fubar@us.ibm.com> |
1362 | L: bonding-devel@lists.sourceforge.net | 1362 | L: netdev@vger.kernel.org |
1363 | W: http://sourceforge.net/projects/bonding/ | 1363 | W: http://sourceforge.net/projects/bonding/ |
1364 | S: Supported | 1364 | S: Supported |
1365 | F: drivers/net/bonding/ | 1365 | F: drivers/net/bonding/ |
@@ -1829,6 +1829,13 @@ W: http://www.chelsio.com | |||
1829 | S: Supported | 1829 | S: Supported |
1830 | F: drivers/net/cxgb4vf/ | 1830 | F: drivers/net/cxgb4vf/ |
1831 | 1831 | ||
1832 | STMMAC ETHERNET DRIVER | ||
1833 | M: Giuseppe Cavallaro <peppe.cavallaro@st.com> | ||
1834 | L: netdev@vger.kernel.org | ||
1835 | W: http://www.stlinux.com | ||
1836 | S: Supported | ||
1837 | F: drivers/net/stmmac/ | ||
1838 | |||
1832 | CYBERPRO FB DRIVER | 1839 | CYBERPRO FB DRIVER |
1833 | M: Russell King <linux@arm.linux.org.uk> | 1840 | M: Russell King <linux@arm.linux.org.uk> |
1834 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1841 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
@@ -2008,6 +2015,7 @@ F: drivers/hwmon/dme1737.c | |||
2008 | DOCBOOK FOR DOCUMENTATION | 2015 | DOCBOOK FOR DOCUMENTATION |
2009 | M: Randy Dunlap <rdunlap@xenotime.net> | 2016 | M: Randy Dunlap <rdunlap@xenotime.net> |
2010 | S: Maintained | 2017 | S: Maintained |
2018 | F: scripts/kernel-doc | ||
2011 | 2019 | ||
2012 | DOCKING STATION DRIVER | 2020 | DOCKING STATION DRIVER |
2013 | M: Shaohua Li <shaohua.li@intel.com> | 2021 | M: Shaohua Li <shaohua.li@intel.com> |
@@ -2018,6 +2026,7 @@ F: drivers/acpi/dock.c | |||
2018 | DOCUMENTATION | 2026 | DOCUMENTATION |
2019 | M: Randy Dunlap <rdunlap@xenotime.net> | 2027 | M: Randy Dunlap <rdunlap@xenotime.net> |
2020 | L: linux-doc@vger.kernel.org | 2028 | L: linux-doc@vger.kernel.org |
2029 | T: quilt oss.oracle.com/~rdunlap/kernel-doc-patches/current/ | ||
2021 | S: Maintained | 2030 | S: Maintained |
2022 | F: Documentation/ | 2031 | F: Documentation/ |
2023 | 2032 | ||
@@ -2435,9 +2444,12 @@ F: drivers/net/wan/sdla.c | |||
2435 | FRAMEBUFFER LAYER | 2444 | FRAMEBUFFER LAYER |
2436 | L: linux-fbdev@vger.kernel.org | 2445 | L: linux-fbdev@vger.kernel.org |
2437 | W: http://linux-fbdev.sourceforge.net/ | 2446 | W: http://linux-fbdev.sourceforge.net/ |
2447 | Q: http://patchwork.kernel.org/project/linux-fbdev/list/ | ||
2448 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/fbdev-2.6.git | ||
2438 | S: Orphan | 2449 | S: Orphan |
2439 | F: Documentation/fb/ | 2450 | F: Documentation/fb/ |
2440 | F: drivers/video/fb* | 2451 | F: drivers/video/ |
2452 | F: include/video/ | ||
2441 | F: include/linux/fb.h | 2453 | F: include/linux/fb.h |
2442 | 2454 | ||
2443 | FREESCALE DMA DRIVER | 2455 | FREESCALE DMA DRIVER |
@@ -5705,7 +5717,7 @@ M: Paul Mundt <lethal@linux-sh.org> | |||
5705 | L: linux-sh@vger.kernel.org | 5717 | L: linux-sh@vger.kernel.org |
5706 | W: http://www.linux-sh.org | 5718 | W: http://www.linux-sh.org |
5707 | Q: http://patchwork.kernel.org/project/linux-sh/list/ | 5719 | Q: http://patchwork.kernel.org/project/linux-sh/list/ |
5708 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git | 5720 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git sh-latest |
5709 | S: Supported | 5721 | S: Supported |
5710 | F: Documentation/sh/ | 5722 | F: Documentation/sh/ |
5711 | F: arch/sh/ | 5723 | F: arch/sh/ |
@@ -5827,6 +5839,8 @@ M: Chris Metcalf <cmetcalf@tilera.com> | |||
5827 | W: http://www.tilera.com/scm/ | 5839 | W: http://www.tilera.com/scm/ |
5828 | S: Supported | 5840 | S: Supported |
5829 | F: arch/tile/ | 5841 | F: arch/tile/ |
5842 | F: drivers/char/hvc_tile.c | ||
5843 | F: drivers/net/tile/ | ||
5830 | 5844 | ||
5831 | TLAN NETWORK DRIVER | 5845 | TLAN NETWORK DRIVER |
5832 | M: Samuel Chessman <chessman@tux.org> | 5846 | M: Samuel Chessman <chessman@tux.org> |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 37 | 3 | SUBLEVEL = 37 |
4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc3 |
5 | NAME = Flesh-Eating Bats with Fangs | 5 | NAME = Flesh-Eating Bats with Fangs |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index bd620a481bee..49778bb43782 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -647,7 +647,7 @@ config ARCH_S3C2410 | |||
647 | select ARCH_HAS_CPUFREQ | 647 | select ARCH_HAS_CPUFREQ |
648 | select HAVE_CLK | 648 | select HAVE_CLK |
649 | select ARCH_USES_GETTIMEOFFSET | 649 | select ARCH_USES_GETTIMEOFFSET |
650 | select HAVE_S3C2410_I2C | 650 | select HAVE_S3C2410_I2C if I2C |
651 | help | 651 | help |
652 | Samsung S3C2410X CPU based systems, such as the Simtec Electronics | 652 | Samsung S3C2410X CPU based systems, such as the Simtec Electronics |
653 | BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or | 653 | BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or |
@@ -677,8 +677,8 @@ config ARCH_S3C64XX | |||
677 | select S3C_DEV_NAND | 677 | select S3C_DEV_NAND |
678 | select USB_ARCH_HAS_OHCI | 678 | select USB_ARCH_HAS_OHCI |
679 | select SAMSUNG_GPIOLIB_4BIT | 679 | select SAMSUNG_GPIOLIB_4BIT |
680 | select HAVE_S3C2410_I2C | 680 | select HAVE_S3C2410_I2C if I2C |
681 | select HAVE_S3C2410_WATCHDOG | 681 | select HAVE_S3C2410_WATCHDOG if WATCHDOG |
682 | help | 682 | help |
683 | Samsung S3C64XX series based systems | 683 | Samsung S3C64XX series based systems |
684 | 684 | ||
@@ -687,10 +687,10 @@ config ARCH_S5P64X0 | |||
687 | select CPU_V6 | 687 | select CPU_V6 |
688 | select GENERIC_GPIO | 688 | select GENERIC_GPIO |
689 | select HAVE_CLK | 689 | select HAVE_CLK |
690 | select HAVE_S3C2410_WATCHDOG | 690 | select HAVE_S3C2410_WATCHDOG if WATCHDOG |
691 | select ARCH_USES_GETTIMEOFFSET | 691 | select ARCH_USES_GETTIMEOFFSET |
692 | select HAVE_S3C2410_I2C | 692 | select HAVE_S3C2410_I2C if I2C |
693 | select HAVE_S3C_RTC | 693 | select HAVE_S3C_RTC if RTC_CLASS |
694 | help | 694 | help |
695 | Samsung S5P64X0 CPU based systems, such as the Samsung SMDK6440, | 695 | Samsung S5P64X0 CPU based systems, such as the Samsung SMDK6440, |
696 | SMDK6450. | 696 | SMDK6450. |
@@ -701,7 +701,7 @@ config ARCH_S5P6442 | |||
701 | select GENERIC_GPIO | 701 | select GENERIC_GPIO |
702 | select HAVE_CLK | 702 | select HAVE_CLK |
703 | select ARCH_USES_GETTIMEOFFSET | 703 | select ARCH_USES_GETTIMEOFFSET |
704 | select HAVE_S3C2410_WATCHDOG | 704 | select HAVE_S3C2410_WATCHDOG if WATCHDOG |
705 | help | 705 | help |
706 | Samsung S5P6442 CPU based systems | 706 | Samsung S5P6442 CPU based systems |
707 | 707 | ||
@@ -712,9 +712,9 @@ config ARCH_S5PC100 | |||
712 | select CPU_V7 | 712 | select CPU_V7 |
713 | select ARM_L1_CACHE_SHIFT_6 | 713 | select ARM_L1_CACHE_SHIFT_6 |
714 | select ARCH_USES_GETTIMEOFFSET | 714 | select ARCH_USES_GETTIMEOFFSET |
715 | select HAVE_S3C2410_I2C | 715 | select HAVE_S3C2410_I2C if I2C |
716 | select HAVE_S3C_RTC | 716 | select HAVE_S3C_RTC if RTC_CLASS |
717 | select HAVE_S3C2410_WATCHDOG | 717 | select HAVE_S3C2410_WATCHDOG if WATCHDOG |
718 | help | 718 | help |
719 | Samsung S5PC100 series based systems | 719 | Samsung S5PC100 series based systems |
720 | 720 | ||
@@ -727,9 +727,9 @@ config ARCH_S5PV210 | |||
727 | select ARM_L1_CACHE_SHIFT_6 | 727 | select ARM_L1_CACHE_SHIFT_6 |
728 | select ARCH_HAS_CPUFREQ | 728 | select ARCH_HAS_CPUFREQ |
729 | select ARCH_USES_GETTIMEOFFSET | 729 | select ARCH_USES_GETTIMEOFFSET |
730 | select HAVE_S3C2410_I2C | 730 | select HAVE_S3C2410_I2C if I2C |
731 | select HAVE_S3C_RTC | 731 | select HAVE_S3C_RTC if RTC_CLASS |
732 | select HAVE_S3C2410_WATCHDOG | 732 | select HAVE_S3C2410_WATCHDOG if WATCHDOG |
733 | help | 733 | help |
734 | Samsung S5PV210/S5PC110 series based systems | 734 | Samsung S5PV210/S5PC110 series based systems |
735 | 735 | ||
@@ -740,9 +740,9 @@ config ARCH_S5PV310 | |||
740 | select GENERIC_GPIO | 740 | select GENERIC_GPIO |
741 | select HAVE_CLK | 741 | select HAVE_CLK |
742 | select GENERIC_CLOCKEVENTS | 742 | select GENERIC_CLOCKEVENTS |
743 | select HAVE_S3C_RTC | 743 | select HAVE_S3C_RTC if RTC_CLASS |
744 | select HAVE_S3C2410_I2C | 744 | select HAVE_S3C2410_I2C if I2C |
745 | select HAVE_S3C2410_WATCHDOG | 745 | select HAVE_S3C2410_WATCHDOG if WATCHDOG |
746 | help | 746 | help |
747 | Samsung S5PV310 series based systems | 747 | Samsung S5PV310 series based systems |
748 | 748 | ||
@@ -1206,10 +1206,11 @@ config SMP | |||
1206 | depends on EXPERIMENTAL | 1206 | depends on EXPERIMENTAL |
1207 | depends on GENERIC_CLOCKEVENTS | 1207 | depends on GENERIC_CLOCKEVENTS |
1208 | depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \ | 1208 | depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \ |
1209 | MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 ||\ | 1209 | MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \ |
1210 | ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 | 1210 | ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \ |
1211 | ARCH_MSM_SCORPIONMP | ||
1211 | select USE_GENERIC_SMP_HELPERS | 1212 | select USE_GENERIC_SMP_HELPERS |
1212 | select HAVE_ARM_SCU | 1213 | select HAVE_ARM_SCU if !ARCH_MSM_SCORPIONMP |
1213 | help | 1214 | help |
1214 | This enables support for systems with more than one CPU. If you have | 1215 | This enables support for systems with more than one CPU. If you have |
1215 | a system with only one CPU, like most personal computers, say N. If | 1216 | a system with only one CPU, like most personal computers, say N. If |
@@ -1284,6 +1285,7 @@ config NR_CPUS | |||
1284 | config HOTPLUG_CPU | 1285 | config HOTPLUG_CPU |
1285 | bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" | 1286 | bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" |
1286 | depends on SMP && HOTPLUG && EXPERIMENTAL | 1287 | depends on SMP && HOTPLUG && EXPERIMENTAL |
1288 | depends on !ARCH_MSM | ||
1287 | help | 1289 | help |
1288 | Say Y here to experiment with turning CPUs off and on. CPUs | 1290 | Say Y here to experiment with turning CPUs off and on. CPUs |
1289 | can be controlled through /sys/devices/system/cpu. | 1291 | can be controlled through /sys/devices/system/cpu. |
@@ -1292,7 +1294,7 @@ config LOCAL_TIMERS | |||
1292 | bool "Use local timer interrupts" | 1294 | bool "Use local timer interrupts" |
1293 | depends on SMP | 1295 | depends on SMP |
1294 | default y | 1296 | default y |
1295 | select HAVE_ARM_TWD | 1297 | select HAVE_ARM_TWD if !ARCH_MSM_SCORPIONMP |
1296 | help | 1298 | help |
1297 | Enable support for local timers on SMP platforms, rather then the | 1299 | Enable support for local timers on SMP platforms, rather then the |
1298 | legacy IPI broadcast method. Local timers allows the system | 1300 | legacy IPI broadcast method. Local timers allows the system |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 6825c34646d4..9be21ba648cd 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -1084,6 +1084,6 @@ memdump: mov r12, r0 | |||
1084 | reloc_end: | 1084 | reloc_end: |
1085 | 1085 | ||
1086 | .align | 1086 | .align |
1087 | .section ".stack", "w" | 1087 | .section ".stack", "aw", %nobits |
1088 | user_stack: .space 4096 | 1088 | user_stack: .space 4096 |
1089 | user_stack_end: | 1089 | user_stack_end: |
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in index d08168941bd6..366a924019ac 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.in +++ b/arch/arm/boot/compressed/vmlinux.lds.in | |||
@@ -57,7 +57,7 @@ SECTIONS | |||
57 | .bss : { *(.bss) } | 57 | .bss : { *(.bss) } |
58 | _end = .; | 58 | _end = .; |
59 | 59 | ||
60 | .stack (NOLOAD) : { *(.stack) } | 60 | .stack : { *(.stack) } |
61 | 61 | ||
62 | .stab 0 : { *(.stab) } | 62 | .stab 0 : { *(.stab) } |
63 | .stabstr 0 : { *(.stabstr) } | 63 | .stabstr 0 : { *(.stabstr) } |
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 062b58c029ab..749bb6622404 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h | |||
@@ -238,7 +238,7 @@ | |||
238 | @ Slightly optimised to avoid incrementing the pointer twice | 238 | @ Slightly optimised to avoid incrementing the pointer twice |
239 | usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort | 239 | usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort |
240 | .if \rept == 2 | 240 | .if \rept == 2 |
241 | usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort | 241 | usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort |
242 | .endif | 242 | .endif |
243 | 243 | ||
244 | add\cond \ptr, #\rept * \inc | 244 | add\cond \ptr, #\rept * \inc |
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 68870c776671..b4ffe9d5b526 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h | |||
@@ -13,6 +13,10 @@ typedef struct { | |||
13 | 13 | ||
14 | #ifdef CONFIG_CPU_HAS_ASID | 14 | #ifdef CONFIG_CPU_HAS_ASID |
15 | #define ASID(mm) ((mm)->context.id & 255) | 15 | #define ASID(mm) ((mm)->context.id & 255) |
16 | |||
17 | /* init_mm.context.id_lock should be initialized. */ | ||
18 | #define INIT_MM_CONTEXT(name) \ | ||
19 | .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock), | ||
16 | #else | 20 | #else |
17 | #define ASID(mm) (0) | 21 | #define ASID(mm) (0) |
18 | #endif | 22 | #endif |
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index b155414192da..53d1d5deb111 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h | |||
@@ -374,6 +374,9 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) | |||
374 | 374 | ||
375 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) | 375 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) |
376 | 376 | ||
377 | /* we don't need complex calculations here as the pmd is folded into the pgd */ | ||
378 | #define pmd_addr_end(addr,end) (end) | ||
379 | |||
377 | /* | 380 | /* |
378 | * Conversion functions: convert a page and protection to a page entry, | 381 | * Conversion functions: convert a page and protection to a page entry, |
379 | * and a page entry and page directory to the page they refer to. | 382 | * and a page entry and page directory to the page they refer to. |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 07a50357492a..421a4bb88fed 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -4,9 +4,7 @@ | |||
4 | * ARM performance counter support. | 4 | * ARM performance counter support. |
5 | * | 5 | * |
6 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles | 6 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles |
7 | * | 7 | * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> |
8 | * ARMv7 support: Jean Pihet <jpihet@mvista.com> | ||
9 | * 2010 (c) MontaVista Software, LLC. | ||
10 | * | 8 | * |
11 | * This code is based on the sparc64 perf event code, which is in turn based | 9 | * This code is based on the sparc64 perf event code, which is in turn based |
12 | * on the x86 code. Callchain code is based on the ARM OProfile backtrace | 10 | * on the x86 code. Callchain code is based on the ARM OProfile backtrace |
@@ -69,29 +67,23 @@ struct cpu_hw_events { | |||
69 | }; | 67 | }; |
70 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | 68 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); |
71 | 69 | ||
72 | /* PMU names. */ | ||
73 | static const char *arm_pmu_names[] = { | ||
74 | [ARM_PERF_PMU_ID_XSCALE1] = "xscale1", | ||
75 | [ARM_PERF_PMU_ID_XSCALE2] = "xscale2", | ||
76 | [ARM_PERF_PMU_ID_V6] = "v6", | ||
77 | [ARM_PERF_PMU_ID_V6MP] = "v6mpcore", | ||
78 | [ARM_PERF_PMU_ID_CA8] = "ARMv7 Cortex-A8", | ||
79 | [ARM_PERF_PMU_ID_CA9] = "ARMv7 Cortex-A9", | ||
80 | }; | ||
81 | |||
82 | struct arm_pmu { | 70 | struct arm_pmu { |
83 | enum arm_perf_pmu_ids id; | 71 | enum arm_perf_pmu_ids id; |
72 | const char *name; | ||
84 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | 73 | irqreturn_t (*handle_irq)(int irq_num, void *dev); |
85 | void (*enable)(struct hw_perf_event *evt, int idx); | 74 | void (*enable)(struct hw_perf_event *evt, int idx); |
86 | void (*disable)(struct hw_perf_event *evt, int idx); | 75 | void (*disable)(struct hw_perf_event *evt, int idx); |
87 | int (*event_map)(int evt); | ||
88 | u64 (*raw_event)(u64); | ||
89 | int (*get_event_idx)(struct cpu_hw_events *cpuc, | 76 | int (*get_event_idx)(struct cpu_hw_events *cpuc, |
90 | struct hw_perf_event *hwc); | 77 | struct hw_perf_event *hwc); |
91 | u32 (*read_counter)(int idx); | 78 | u32 (*read_counter)(int idx); |
92 | void (*write_counter)(int idx, u32 val); | 79 | void (*write_counter)(int idx, u32 val); |
93 | void (*start)(void); | 80 | void (*start)(void); |
94 | void (*stop)(void); | 81 | void (*stop)(void); |
82 | const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] | ||
83 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
84 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
85 | const unsigned (*event_map)[PERF_COUNT_HW_MAX]; | ||
86 | u32 raw_event_mask; | ||
95 | int num_events; | 87 | int num_events; |
96 | u64 max_period; | 88 | u64 max_period; |
97 | }; | 89 | }; |
@@ -136,10 +128,6 @@ EXPORT_SYMBOL_GPL(perf_num_counters); | |||
136 | 128 | ||
137 | #define CACHE_OP_UNSUPPORTED 0xFFFF | 129 | #define CACHE_OP_UNSUPPORTED 0xFFFF |
138 | 130 | ||
139 | static unsigned armpmu_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
140 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
141 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
142 | |||
143 | static int | 131 | static int |
144 | armpmu_map_cache_event(u64 config) | 132 | armpmu_map_cache_event(u64 config) |
145 | { | 133 | { |
@@ -157,7 +145,7 @@ armpmu_map_cache_event(u64 config) | |||
157 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | 145 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) |
158 | return -EINVAL; | 146 | return -EINVAL; |
159 | 147 | ||
160 | ret = (int)armpmu_perf_cache_map[cache_type][cache_op][cache_result]; | 148 | ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result]; |
161 | 149 | ||
162 | if (ret == CACHE_OP_UNSUPPORTED) | 150 | if (ret == CACHE_OP_UNSUPPORTED) |
163 | return -ENOENT; | 151 | return -ENOENT; |
@@ -166,6 +154,19 @@ armpmu_map_cache_event(u64 config) | |||
166 | } | 154 | } |
167 | 155 | ||
168 | static int | 156 | static int |
157 | armpmu_map_event(u64 config) | ||
158 | { | ||
159 | int mapping = (*armpmu->event_map)[config]; | ||
160 | return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping; | ||
161 | } | ||
162 | |||
163 | static int | ||
164 | armpmu_map_raw_event(u64 config) | ||
165 | { | ||
166 | return (int)(config & armpmu->raw_event_mask); | ||
167 | } | ||
168 | |||
169 | static int | ||
169 | armpmu_event_set_period(struct perf_event *event, | 170 | armpmu_event_set_period(struct perf_event *event, |
170 | struct hw_perf_event *hwc, | 171 | struct hw_perf_event *hwc, |
171 | int idx) | 172 | int idx) |
@@ -458,11 +459,11 @@ __hw_perf_event_init(struct perf_event *event) | |||
458 | 459 | ||
459 | /* Decode the generic type into an ARM event identifier. */ | 460 | /* Decode the generic type into an ARM event identifier. */ |
460 | if (PERF_TYPE_HARDWARE == event->attr.type) { | 461 | if (PERF_TYPE_HARDWARE == event->attr.type) { |
461 | mapping = armpmu->event_map(event->attr.config); | 462 | mapping = armpmu_map_event(event->attr.config); |
462 | } else if (PERF_TYPE_HW_CACHE == event->attr.type) { | 463 | } else if (PERF_TYPE_HW_CACHE == event->attr.type) { |
463 | mapping = armpmu_map_cache_event(event->attr.config); | 464 | mapping = armpmu_map_cache_event(event->attr.config); |
464 | } else if (PERF_TYPE_RAW == event->attr.type) { | 465 | } else if (PERF_TYPE_RAW == event->attr.type) { |
465 | mapping = armpmu->raw_event(event->attr.config); | 466 | mapping = armpmu_map_raw_event(event->attr.config); |
466 | } else { | 467 | } else { |
467 | pr_debug("event type %x not supported\n", event->attr.type); | 468 | pr_debug("event type %x not supported\n", event->attr.type); |
468 | return -EOPNOTSUPP; | 469 | return -EOPNOTSUPP; |
@@ -603,2366 +604,10 @@ static struct pmu pmu = { | |||
603 | .read = armpmu_read, | 604 | .read = armpmu_read, |
604 | }; | 605 | }; |
605 | 606 | ||
606 | /* | 607 | /* Include the PMU-specific implementations. */ |
607 | * ARMv6 Performance counter handling code. | 608 | #include "perf_event_xscale.c" |
608 | * | 609 | #include "perf_event_v6.c" |
609 | * ARMv6 has 2 configurable performance counters and a single cycle counter. | 610 | #include "perf_event_v7.c" |
610 | * They all share a single reset bit but can be written to zero so we can use | ||
611 | * that for a reset. | ||
612 | * | ||
613 | * The counters can't be individually enabled or disabled so when we remove | ||
614 | * one event and replace it with another we could get spurious counts from the | ||
615 | * wrong event. However, we can take advantage of the fact that the | ||
616 | * performance counters can export events to the event bus, and the event bus | ||
617 | * itself can be monitored. This requires that we *don't* export the events to | ||
618 | * the event bus. The procedure for disabling a configurable counter is: | ||
619 | * - change the counter to count the ETMEXTOUT[0] signal (0x20). This | ||
620 | * effectively stops the counter from counting. | ||
621 | * - disable the counter's interrupt generation (each counter has it's | ||
622 | * own interrupt enable bit). | ||
623 | * Once stopped, the counter value can be written as 0 to reset. | ||
624 | * | ||
625 | * To enable a counter: | ||
626 | * - enable the counter's interrupt generation. | ||
627 | * - set the new event type. | ||
628 | * | ||
629 | * Note: the dedicated cycle counter only counts cycles and can't be | ||
630 | * enabled/disabled independently of the others. When we want to disable the | ||
631 | * cycle counter, we have to just disable the interrupt reporting and start | ||
632 | * ignoring that counter. When re-enabling, we have to reset the value and | ||
633 | * enable the interrupt. | ||
634 | */ | ||
635 | |||
636 | enum armv6_perf_types { | ||
637 | ARMV6_PERFCTR_ICACHE_MISS = 0x0, | ||
638 | ARMV6_PERFCTR_IBUF_STALL = 0x1, | ||
639 | ARMV6_PERFCTR_DDEP_STALL = 0x2, | ||
640 | ARMV6_PERFCTR_ITLB_MISS = 0x3, | ||
641 | ARMV6_PERFCTR_DTLB_MISS = 0x4, | ||
642 | ARMV6_PERFCTR_BR_EXEC = 0x5, | ||
643 | ARMV6_PERFCTR_BR_MISPREDICT = 0x6, | ||
644 | ARMV6_PERFCTR_INSTR_EXEC = 0x7, | ||
645 | ARMV6_PERFCTR_DCACHE_HIT = 0x9, | ||
646 | ARMV6_PERFCTR_DCACHE_ACCESS = 0xA, | ||
647 | ARMV6_PERFCTR_DCACHE_MISS = 0xB, | ||
648 | ARMV6_PERFCTR_DCACHE_WBACK = 0xC, | ||
649 | ARMV6_PERFCTR_SW_PC_CHANGE = 0xD, | ||
650 | ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF, | ||
651 | ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10, | ||
652 | ARMV6_PERFCTR_LSU_FULL_STALL = 0x11, | ||
653 | ARMV6_PERFCTR_WBUF_DRAINED = 0x12, | ||
654 | ARMV6_PERFCTR_CPU_CYCLES = 0xFF, | ||
655 | ARMV6_PERFCTR_NOP = 0x20, | ||
656 | }; | ||
657 | |||
658 | enum armv6_counters { | ||
659 | ARMV6_CYCLE_COUNTER = 1, | ||
660 | ARMV6_COUNTER0, | ||
661 | ARMV6_COUNTER1, | ||
662 | }; | ||
663 | |||
664 | /* | ||
665 | * The hardware events that we support. We do support cache operations but | ||
666 | * we have harvard caches and no way to combine instruction and data | ||
667 | * accesses/misses in hardware. | ||
668 | */ | ||
669 | static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { | ||
670 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, | ||
671 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, | ||
672 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
673 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
674 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, | ||
675 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, | ||
676 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | ||
677 | }; | ||
678 | |||
679 | static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
680 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
681 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
682 | [C(L1D)] = { | ||
683 | /* | ||
684 | * The performance counters don't differentiate between read | ||
685 | * and write accesses/misses so this isn't strictly correct, | ||
686 | * but it's the best we can do. Writes and reads get | ||
687 | * combined. | ||
688 | */ | ||
689 | [C(OP_READ)] = { | ||
690 | [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, | ||
691 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, | ||
692 | }, | ||
693 | [C(OP_WRITE)] = { | ||
694 | [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, | ||
695 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, | ||
696 | }, | ||
697 | [C(OP_PREFETCH)] = { | ||
698 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
699 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
700 | }, | ||
701 | }, | ||
702 | [C(L1I)] = { | ||
703 | [C(OP_READ)] = { | ||
704 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
705 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, | ||
706 | }, | ||
707 | [C(OP_WRITE)] = { | ||
708 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
709 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, | ||
710 | }, | ||
711 | [C(OP_PREFETCH)] = { | ||
712 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
713 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
714 | }, | ||
715 | }, | ||
716 | [C(LL)] = { | ||
717 | [C(OP_READ)] = { | ||
718 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
719 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
720 | }, | ||
721 | [C(OP_WRITE)] = { | ||
722 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
723 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
724 | }, | ||
725 | [C(OP_PREFETCH)] = { | ||
726 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
727 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
728 | }, | ||
729 | }, | ||
730 | [C(DTLB)] = { | ||
731 | /* | ||
732 | * The ARM performance counters can count micro DTLB misses, | ||
733 | * micro ITLB misses and main TLB misses. There isn't an event | ||
734 | * for TLB misses, so use the micro misses here and if users | ||
735 | * want the main TLB misses they can use a raw counter. | ||
736 | */ | ||
737 | [C(OP_READ)] = { | ||
738 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
739 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, | ||
740 | }, | ||
741 | [C(OP_WRITE)] = { | ||
742 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
743 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, | ||
744 | }, | ||
745 | [C(OP_PREFETCH)] = { | ||
746 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
747 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
748 | }, | ||
749 | }, | ||
750 | [C(ITLB)] = { | ||
751 | [C(OP_READ)] = { | ||
752 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
753 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, | ||
754 | }, | ||
755 | [C(OP_WRITE)] = { | ||
756 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
757 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, | ||
758 | }, | ||
759 | [C(OP_PREFETCH)] = { | ||
760 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
761 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
762 | }, | ||
763 | }, | ||
764 | [C(BPU)] = { | ||
765 | [C(OP_READ)] = { | ||
766 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
767 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
768 | }, | ||
769 | [C(OP_WRITE)] = { | ||
770 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
771 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
772 | }, | ||
773 | [C(OP_PREFETCH)] = { | ||
774 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
775 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
776 | }, | ||
777 | }, | ||
778 | }; | ||
779 | |||
780 | enum armv6mpcore_perf_types { | ||
781 | ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0, | ||
782 | ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1, | ||
783 | ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2, | ||
784 | ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3, | ||
785 | ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4, | ||
786 | ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5, | ||
787 | ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6, | ||
788 | ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7, | ||
789 | ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8, | ||
790 | ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA, | ||
791 | ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB, | ||
792 | ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC, | ||
793 | ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD, | ||
794 | ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE, | ||
795 | ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF, | ||
796 | ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10, | ||
797 | ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11, | ||
798 | ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12, | ||
799 | ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13, | ||
800 | ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF, | ||
801 | }; | ||
802 | |||
803 | /* | ||
804 | * The hardware events that we support. We do support cache operations but | ||
805 | * we have harvard caches and no way to combine instruction and data | ||
806 | * accesses/misses in hardware. | ||
807 | */ | ||
808 | static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = { | ||
809 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, | ||
810 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, | ||
811 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
812 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
813 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, | ||
814 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, | ||
815 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | ||
816 | }; | ||
817 | |||
818 | static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
819 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
820 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
821 | [C(L1D)] = { | ||
822 | [C(OP_READ)] = { | ||
823 | [C(RESULT_ACCESS)] = | ||
824 | ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS, | ||
825 | [C(RESULT_MISS)] = | ||
826 | ARMV6MPCORE_PERFCTR_DCACHE_RDMISS, | ||
827 | }, | ||
828 | [C(OP_WRITE)] = { | ||
829 | [C(RESULT_ACCESS)] = | ||
830 | ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS, | ||
831 | [C(RESULT_MISS)] = | ||
832 | ARMV6MPCORE_PERFCTR_DCACHE_WRMISS, | ||
833 | }, | ||
834 | [C(OP_PREFETCH)] = { | ||
835 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
836 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
837 | }, | ||
838 | }, | ||
839 | [C(L1I)] = { | ||
840 | [C(OP_READ)] = { | ||
841 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
842 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, | ||
843 | }, | ||
844 | [C(OP_WRITE)] = { | ||
845 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
846 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, | ||
847 | }, | ||
848 | [C(OP_PREFETCH)] = { | ||
849 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
850 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
851 | }, | ||
852 | }, | ||
853 | [C(LL)] = { | ||
854 | [C(OP_READ)] = { | ||
855 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
856 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
857 | }, | ||
858 | [C(OP_WRITE)] = { | ||
859 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
860 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
861 | }, | ||
862 | [C(OP_PREFETCH)] = { | ||
863 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
864 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
865 | }, | ||
866 | }, | ||
867 | [C(DTLB)] = { | ||
868 | /* | ||
869 | * The ARM performance counters can count micro DTLB misses, | ||
870 | * micro ITLB misses and main TLB misses. There isn't an event | ||
871 | * for TLB misses, so use the micro misses here and if users | ||
872 | * want the main TLB misses they can use a raw counter. | ||
873 | */ | ||
874 | [C(OP_READ)] = { | ||
875 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
876 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, | ||
877 | }, | ||
878 | [C(OP_WRITE)] = { | ||
879 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
880 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, | ||
881 | }, | ||
882 | [C(OP_PREFETCH)] = { | ||
883 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
884 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
885 | }, | ||
886 | }, | ||
887 | [C(ITLB)] = { | ||
888 | [C(OP_READ)] = { | ||
889 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
890 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, | ||
891 | }, | ||
892 | [C(OP_WRITE)] = { | ||
893 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
894 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, | ||
895 | }, | ||
896 | [C(OP_PREFETCH)] = { | ||
897 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
898 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
899 | }, | ||
900 | }, | ||
901 | [C(BPU)] = { | ||
902 | [C(OP_READ)] = { | ||
903 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
904 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
905 | }, | ||
906 | [C(OP_WRITE)] = { | ||
907 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
908 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
909 | }, | ||
910 | [C(OP_PREFETCH)] = { | ||
911 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
912 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
913 | }, | ||
914 | }, | ||
915 | }; | ||
916 | |||
917 | static inline unsigned long | ||
918 | armv6_pmcr_read(void) | ||
919 | { | ||
920 | u32 val; | ||
921 | asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val)); | ||
922 | return val; | ||
923 | } | ||
924 | |||
925 | static inline void | ||
926 | armv6_pmcr_write(unsigned long val) | ||
927 | { | ||
928 | asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val)); | ||
929 | } | ||
930 | |||
931 | #define ARMV6_PMCR_ENABLE (1 << 0) | ||
932 | #define ARMV6_PMCR_CTR01_RESET (1 << 1) | ||
933 | #define ARMV6_PMCR_CCOUNT_RESET (1 << 2) | ||
934 | #define ARMV6_PMCR_CCOUNT_DIV (1 << 3) | ||
935 | #define ARMV6_PMCR_COUNT0_IEN (1 << 4) | ||
936 | #define ARMV6_PMCR_COUNT1_IEN (1 << 5) | ||
937 | #define ARMV6_PMCR_CCOUNT_IEN (1 << 6) | ||
938 | #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8) | ||
939 | #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9) | ||
940 | #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10) | ||
941 | #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20 | ||
942 | #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT) | ||
943 | #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12 | ||
944 | #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT) | ||
945 | |||
946 | #define ARMV6_PMCR_OVERFLOWED_MASK \ | ||
947 | (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \ | ||
948 | ARMV6_PMCR_CCOUNT_OVERFLOW) | ||
949 | |||
950 | static inline int | ||
951 | armv6_pmcr_has_overflowed(unsigned long pmcr) | ||
952 | { | ||
953 | return (pmcr & ARMV6_PMCR_OVERFLOWED_MASK); | ||
954 | } | ||
955 | |||
956 | static inline int | ||
957 | armv6_pmcr_counter_has_overflowed(unsigned long pmcr, | ||
958 | enum armv6_counters counter) | ||
959 | { | ||
960 | int ret = 0; | ||
961 | |||
962 | if (ARMV6_CYCLE_COUNTER == counter) | ||
963 | ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW; | ||
964 | else if (ARMV6_COUNTER0 == counter) | ||
965 | ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW; | ||
966 | else if (ARMV6_COUNTER1 == counter) | ||
967 | ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW; | ||
968 | else | ||
969 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
970 | |||
971 | return ret; | ||
972 | } | ||
973 | |||
974 | static inline u32 | ||
975 | armv6pmu_read_counter(int counter) | ||
976 | { | ||
977 | unsigned long value = 0; | ||
978 | |||
979 | if (ARMV6_CYCLE_COUNTER == counter) | ||
980 | asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value)); | ||
981 | else if (ARMV6_COUNTER0 == counter) | ||
982 | asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value)); | ||
983 | else if (ARMV6_COUNTER1 == counter) | ||
984 | asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value)); | ||
985 | else | ||
986 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
987 | |||
988 | return value; | ||
989 | } | ||
990 | |||
991 | static inline void | ||
992 | armv6pmu_write_counter(int counter, | ||
993 | u32 value) | ||
994 | { | ||
995 | if (ARMV6_CYCLE_COUNTER == counter) | ||
996 | asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); | ||
997 | else if (ARMV6_COUNTER0 == counter) | ||
998 | asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value)); | ||
999 | else if (ARMV6_COUNTER1 == counter) | ||
1000 | asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value)); | ||
1001 | else | ||
1002 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
1003 | } | ||
1004 | |||
1005 | void | ||
1006 | armv6pmu_enable_event(struct hw_perf_event *hwc, | ||
1007 | int idx) | ||
1008 | { | ||
1009 | unsigned long val, mask, evt, flags; | ||
1010 | |||
1011 | if (ARMV6_CYCLE_COUNTER == idx) { | ||
1012 | mask = 0; | ||
1013 | evt = ARMV6_PMCR_CCOUNT_IEN; | ||
1014 | } else if (ARMV6_COUNTER0 == idx) { | ||
1015 | mask = ARMV6_PMCR_EVT_COUNT0_MASK; | ||
1016 | evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) | | ||
1017 | ARMV6_PMCR_COUNT0_IEN; | ||
1018 | } else if (ARMV6_COUNTER1 == idx) { | ||
1019 | mask = ARMV6_PMCR_EVT_COUNT1_MASK; | ||
1020 | evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) | | ||
1021 | ARMV6_PMCR_COUNT1_IEN; | ||
1022 | } else { | ||
1023 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
1024 | return; | ||
1025 | } | ||
1026 | |||
1027 | /* | ||
1028 | * Mask out the current event and set the counter to count the event | ||
1029 | * that we're interested in. | ||
1030 | */ | ||
1031 | spin_lock_irqsave(&pmu_lock, flags); | ||
1032 | val = armv6_pmcr_read(); | ||
1033 | val &= ~mask; | ||
1034 | val |= evt; | ||
1035 | armv6_pmcr_write(val); | ||
1036 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1037 | } | ||
1038 | |||
1039 | static irqreturn_t | ||
1040 | armv6pmu_handle_irq(int irq_num, | ||
1041 | void *dev) | ||
1042 | { | ||
1043 | unsigned long pmcr = armv6_pmcr_read(); | ||
1044 | struct perf_sample_data data; | ||
1045 | struct cpu_hw_events *cpuc; | ||
1046 | struct pt_regs *regs; | ||
1047 | int idx; | ||
1048 | |||
1049 | if (!armv6_pmcr_has_overflowed(pmcr)) | ||
1050 | return IRQ_NONE; | ||
1051 | |||
1052 | regs = get_irq_regs(); | ||
1053 | |||
1054 | /* | ||
1055 | * The interrupts are cleared by writing the overflow flags back to | ||
1056 | * the control register. All of the other bits don't have any effect | ||
1057 | * if they are rewritten, so write the whole value back. | ||
1058 | */ | ||
1059 | armv6_pmcr_write(pmcr); | ||
1060 | |||
1061 | perf_sample_data_init(&data, 0); | ||
1062 | |||
1063 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
1064 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
1065 | struct perf_event *event = cpuc->events[idx]; | ||
1066 | struct hw_perf_event *hwc; | ||
1067 | |||
1068 | if (!test_bit(idx, cpuc->active_mask)) | ||
1069 | continue; | ||
1070 | |||
1071 | /* | ||
1072 | * We have a single interrupt for all counters. Check that | ||
1073 | * each counter has overflowed before we process it. | ||
1074 | */ | ||
1075 | if (!armv6_pmcr_counter_has_overflowed(pmcr, idx)) | ||
1076 | continue; | ||
1077 | |||
1078 | hwc = &event->hw; | ||
1079 | armpmu_event_update(event, hwc, idx); | ||
1080 | data.period = event->hw.last_period; | ||
1081 | if (!armpmu_event_set_period(event, hwc, idx)) | ||
1082 | continue; | ||
1083 | |||
1084 | if (perf_event_overflow(event, 0, &data, regs)) | ||
1085 | armpmu->disable(hwc, idx); | ||
1086 | } | ||
1087 | |||
1088 | /* | ||
1089 | * Handle the pending perf events. | ||
1090 | * | ||
1091 | * Note: this call *must* be run with interrupts disabled. For | ||
1092 | * platforms that can have the PMU interrupts raised as an NMI, this | ||
1093 | * will not work. | ||
1094 | */ | ||
1095 | irq_work_run(); | ||
1096 | |||
1097 | return IRQ_HANDLED; | ||
1098 | } | ||
1099 | |||
1100 | static void | ||
1101 | armv6pmu_start(void) | ||
1102 | { | ||
1103 | unsigned long flags, val; | ||
1104 | |||
1105 | spin_lock_irqsave(&pmu_lock, flags); | ||
1106 | val = armv6_pmcr_read(); | ||
1107 | val |= ARMV6_PMCR_ENABLE; | ||
1108 | armv6_pmcr_write(val); | ||
1109 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1110 | } | ||
1111 | |||
1112 | void | ||
1113 | armv6pmu_stop(void) | ||
1114 | { | ||
1115 | unsigned long flags, val; | ||
1116 | |||
1117 | spin_lock_irqsave(&pmu_lock, flags); | ||
1118 | val = armv6_pmcr_read(); | ||
1119 | val &= ~ARMV6_PMCR_ENABLE; | ||
1120 | armv6_pmcr_write(val); | ||
1121 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1122 | } | ||
1123 | |||
1124 | static inline int | ||
1125 | armv6pmu_event_map(int config) | ||
1126 | { | ||
1127 | int mapping = armv6_perf_map[config]; | ||
1128 | if (HW_OP_UNSUPPORTED == mapping) | ||
1129 | mapping = -EOPNOTSUPP; | ||
1130 | return mapping; | ||
1131 | } | ||
1132 | |||
1133 | static inline int | ||
1134 | armv6mpcore_pmu_event_map(int config) | ||
1135 | { | ||
1136 | int mapping = armv6mpcore_perf_map[config]; | ||
1137 | if (HW_OP_UNSUPPORTED == mapping) | ||
1138 | mapping = -EOPNOTSUPP; | ||
1139 | return mapping; | ||
1140 | } | ||
1141 | |||
1142 | static u64 | ||
1143 | armv6pmu_raw_event(u64 config) | ||
1144 | { | ||
1145 | return config & 0xff; | ||
1146 | } | ||
1147 | |||
1148 | static int | ||
1149 | armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, | ||
1150 | struct hw_perf_event *event) | ||
1151 | { | ||
1152 | /* Always place a cycle counter into the cycle counter. */ | ||
1153 | if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) { | ||
1154 | if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) | ||
1155 | return -EAGAIN; | ||
1156 | |||
1157 | return ARMV6_CYCLE_COUNTER; | ||
1158 | } else { | ||
1159 | /* | ||
1160 | * For anything other than a cycle counter, try and use | ||
1161 | * counter0 and counter1. | ||
1162 | */ | ||
1163 | if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) { | ||
1164 | return ARMV6_COUNTER1; | ||
1165 | } | ||
1166 | |||
1167 | if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) { | ||
1168 | return ARMV6_COUNTER0; | ||
1169 | } | ||
1170 | |||
1171 | /* The counters are all in use. */ | ||
1172 | return -EAGAIN; | ||
1173 | } | ||
1174 | } | ||
1175 | |||
1176 | static void | ||
1177 | armv6pmu_disable_event(struct hw_perf_event *hwc, | ||
1178 | int idx) | ||
1179 | { | ||
1180 | unsigned long val, mask, evt, flags; | ||
1181 | |||
1182 | if (ARMV6_CYCLE_COUNTER == idx) { | ||
1183 | mask = ARMV6_PMCR_CCOUNT_IEN; | ||
1184 | evt = 0; | ||
1185 | } else if (ARMV6_COUNTER0 == idx) { | ||
1186 | mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK; | ||
1187 | evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT; | ||
1188 | } else if (ARMV6_COUNTER1 == idx) { | ||
1189 | mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK; | ||
1190 | evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT; | ||
1191 | } else { | ||
1192 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
1193 | return; | ||
1194 | } | ||
1195 | |||
1196 | /* | ||
1197 | * Mask out the current event and set the counter to count the number | ||
1198 | * of ETM bus signal assertion cycles. The external reporting should | ||
1199 | * be disabled and so this should never increment. | ||
1200 | */ | ||
1201 | spin_lock_irqsave(&pmu_lock, flags); | ||
1202 | val = armv6_pmcr_read(); | ||
1203 | val &= ~mask; | ||
1204 | val |= evt; | ||
1205 | armv6_pmcr_write(val); | ||
1206 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1207 | } | ||
1208 | |||
1209 | static void | ||
1210 | armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | ||
1211 | int idx) | ||
1212 | { | ||
1213 | unsigned long val, mask, flags, evt = 0; | ||
1214 | |||
1215 | if (ARMV6_CYCLE_COUNTER == idx) { | ||
1216 | mask = ARMV6_PMCR_CCOUNT_IEN; | ||
1217 | } else if (ARMV6_COUNTER0 == idx) { | ||
1218 | mask = ARMV6_PMCR_COUNT0_IEN; | ||
1219 | } else if (ARMV6_COUNTER1 == idx) { | ||
1220 | mask = ARMV6_PMCR_COUNT1_IEN; | ||
1221 | } else { | ||
1222 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
1223 | return; | ||
1224 | } | ||
1225 | |||
1226 | /* | ||
1227 | * Unlike UP ARMv6, we don't have a way of stopping the counters. We | ||
1228 | * simply disable the interrupt reporting. | ||
1229 | */ | ||
1230 | spin_lock_irqsave(&pmu_lock, flags); | ||
1231 | val = armv6_pmcr_read(); | ||
1232 | val &= ~mask; | ||
1233 | val |= evt; | ||
1234 | armv6_pmcr_write(val); | ||
1235 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1236 | } | ||
1237 | |||
1238 | static const struct arm_pmu armv6pmu = { | ||
1239 | .id = ARM_PERF_PMU_ID_V6, | ||
1240 | .handle_irq = armv6pmu_handle_irq, | ||
1241 | .enable = armv6pmu_enable_event, | ||
1242 | .disable = armv6pmu_disable_event, | ||
1243 | .event_map = armv6pmu_event_map, | ||
1244 | .raw_event = armv6pmu_raw_event, | ||
1245 | .read_counter = armv6pmu_read_counter, | ||
1246 | .write_counter = armv6pmu_write_counter, | ||
1247 | .get_event_idx = armv6pmu_get_event_idx, | ||
1248 | .start = armv6pmu_start, | ||
1249 | .stop = armv6pmu_stop, | ||
1250 | .num_events = 3, | ||
1251 | .max_period = (1LLU << 32) - 1, | ||
1252 | }; | ||
1253 | |||
1254 | /* | ||
1255 | * ARMv6mpcore is almost identical to single core ARMv6 with the exception | ||
1256 | * that some of the events have different enumerations and that there is no | ||
1257 | * *hack* to stop the programmable counters. To stop the counters we simply | ||
1258 | * disable the interrupt reporting and update the event. When unthrottling we | ||
1259 | * reset the period and enable the interrupt reporting. | ||
1260 | */ | ||
1261 | static const struct arm_pmu armv6mpcore_pmu = { | ||
1262 | .id = ARM_PERF_PMU_ID_V6MP, | ||
1263 | .handle_irq = armv6pmu_handle_irq, | ||
1264 | .enable = armv6pmu_enable_event, | ||
1265 | .disable = armv6mpcore_pmu_disable_event, | ||
1266 | .event_map = armv6mpcore_pmu_event_map, | ||
1267 | .raw_event = armv6pmu_raw_event, | ||
1268 | .read_counter = armv6pmu_read_counter, | ||
1269 | .write_counter = armv6pmu_write_counter, | ||
1270 | .get_event_idx = armv6pmu_get_event_idx, | ||
1271 | .start = armv6pmu_start, | ||
1272 | .stop = armv6pmu_stop, | ||
1273 | .num_events = 3, | ||
1274 | .max_period = (1LLU << 32) - 1, | ||
1275 | }; | ||
1276 | |||
1277 | /* | ||
1278 | * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code. | ||
1279 | * | ||
1280 | * Copied from ARMv6 code, with the low level code inspired | ||
1281 | * by the ARMv7 Oprofile code. | ||
1282 | * | ||
1283 | * Cortex-A8 has up to 4 configurable performance counters and | ||
1284 | * a single cycle counter. | ||
1285 | * Cortex-A9 has up to 31 configurable performance counters and | ||
1286 | * a single cycle counter. | ||
1287 | * | ||
1288 | * All counters can be enabled/disabled and IRQ masked separately. The cycle | ||
1289 | * counter and all 4 performance counters together can be reset separately. | ||
1290 | */ | ||
1291 | |||
1292 | /* Common ARMv7 event types */ | ||
1293 | enum armv7_perf_types { | ||
1294 | ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, | ||
1295 | ARMV7_PERFCTR_IFETCH_MISS = 0x01, | ||
1296 | ARMV7_PERFCTR_ITLB_MISS = 0x02, | ||
1297 | ARMV7_PERFCTR_DCACHE_REFILL = 0x03, | ||
1298 | ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, | ||
1299 | ARMV7_PERFCTR_DTLB_REFILL = 0x05, | ||
1300 | ARMV7_PERFCTR_DREAD = 0x06, | ||
1301 | ARMV7_PERFCTR_DWRITE = 0x07, | ||
1302 | |||
1303 | ARMV7_PERFCTR_EXC_TAKEN = 0x09, | ||
1304 | ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, | ||
1305 | ARMV7_PERFCTR_CID_WRITE = 0x0B, | ||
1306 | /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. | ||
1307 | * It counts: | ||
1308 | * - all branch instructions, | ||
1309 | * - instructions that explicitly write the PC, | ||
1310 | * - exception generating instructions. | ||
1311 | */ | ||
1312 | ARMV7_PERFCTR_PC_WRITE = 0x0C, | ||
1313 | ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, | ||
1314 | ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, | ||
1315 | ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, | ||
1316 | ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, | ||
1317 | |||
1318 | ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12, | ||
1319 | |||
1320 | ARMV7_PERFCTR_CPU_CYCLES = 0xFF | ||
1321 | }; | ||
1322 | |||
1323 | /* ARMv7 Cortex-A8 specific event types */ | ||
1324 | enum armv7_a8_perf_types { | ||
1325 | ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, | ||
1326 | |||
1327 | ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, | ||
1328 | |||
1329 | ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, | ||
1330 | ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, | ||
1331 | ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, | ||
1332 | ARMV7_PERFCTR_L2_ACCESS = 0x43, | ||
1333 | ARMV7_PERFCTR_L2_CACH_MISS = 0x44, | ||
1334 | ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45, | ||
1335 | ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46, | ||
1336 | ARMV7_PERFCTR_MEMORY_REPLAY = 0x47, | ||
1337 | ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48, | ||
1338 | ARMV7_PERFCTR_L1_DATA_MISS = 0x49, | ||
1339 | ARMV7_PERFCTR_L1_INST_MISS = 0x4A, | ||
1340 | ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B, | ||
1341 | ARMV7_PERFCTR_L1_NEON_DATA = 0x4C, | ||
1342 | ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D, | ||
1343 | ARMV7_PERFCTR_L2_NEON = 0x4E, | ||
1344 | ARMV7_PERFCTR_L2_NEON_HIT = 0x4F, | ||
1345 | ARMV7_PERFCTR_L1_INST = 0x50, | ||
1346 | ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51, | ||
1347 | ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52, | ||
1348 | ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53, | ||
1349 | ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54, | ||
1350 | ARMV7_PERFCTR_OP_EXECUTED = 0x55, | ||
1351 | ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56, | ||
1352 | ARMV7_PERFCTR_CYCLES_INST = 0x57, | ||
1353 | ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58, | ||
1354 | ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59, | ||
1355 | ARMV7_PERFCTR_NEON_CYCLES = 0x5A, | ||
1356 | |||
1357 | ARMV7_PERFCTR_PMU0_EVENTS = 0x70, | ||
1358 | ARMV7_PERFCTR_PMU1_EVENTS = 0x71, | ||
1359 | ARMV7_PERFCTR_PMU_EVENTS = 0x72, | ||
1360 | }; | ||
1361 | |||
1362 | /* ARMv7 Cortex-A9 specific event types */ | ||
1363 | enum armv7_a9_perf_types { | ||
1364 | ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40, | ||
1365 | ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41, | ||
1366 | ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42, | ||
1367 | |||
1368 | ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50, | ||
1369 | ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51, | ||
1370 | |||
1371 | ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60, | ||
1372 | ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61, | ||
1373 | ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62, | ||
1374 | ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63, | ||
1375 | ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64, | ||
1376 | ARMV7_PERFCTR_DATA_EVICTION = 0x65, | ||
1377 | ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66, | ||
1378 | ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67, | ||
1379 | ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68, | ||
1380 | |||
1381 | ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E, | ||
1382 | |||
1383 | ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70, | ||
1384 | ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71, | ||
1385 | ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72, | ||
1386 | ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73, | ||
1387 | ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74, | ||
1388 | |||
1389 | ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80, | ||
1390 | ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81, | ||
1391 | ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82, | ||
1392 | ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83, | ||
1393 | ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84, | ||
1394 | ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85, | ||
1395 | ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86, | ||
1396 | |||
1397 | ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A, | ||
1398 | ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B, | ||
1399 | |||
1400 | ARMV7_PERFCTR_ISB_INST = 0x90, | ||
1401 | ARMV7_PERFCTR_DSB_INST = 0x91, | ||
1402 | ARMV7_PERFCTR_DMB_INST = 0x92, | ||
1403 | ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93, | ||
1404 | |||
1405 | ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0, | ||
1406 | ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1, | ||
1407 | ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2, | ||
1408 | ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3, | ||
1409 | ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4, | ||
1410 | ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5 | ||
1411 | }; | ||
1412 | |||
1413 | /* | ||
1414 | * Cortex-A8 HW events mapping | ||
1415 | * | ||
1416 | * The hardware events that we support. We do support cache operations but | ||
1417 | * we have harvard caches and no way to combine instruction and data | ||
1418 | * accesses/misses in hardware. | ||
1419 | */ | ||
1420 | static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { | ||
1421 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | ||
1422 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | ||
1423 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
1424 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
1425 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | ||
1426 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1427 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | ||
1428 | }; | ||
1429 | |||
1430 | static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
1431 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1432 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1433 | [C(L1D)] = { | ||
1434 | /* | ||
1435 | * The performance counters don't differentiate between read | ||
1436 | * and write accesses/misses so this isn't strictly correct, | ||
1437 | * but it's the best we can do. Writes and reads get | ||
1438 | * combined. | ||
1439 | */ | ||
1440 | [C(OP_READ)] = { | ||
1441 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
1442 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
1443 | }, | ||
1444 | [C(OP_WRITE)] = { | ||
1445 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
1446 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
1447 | }, | ||
1448 | [C(OP_PREFETCH)] = { | ||
1449 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1450 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1451 | }, | ||
1452 | }, | ||
1453 | [C(L1I)] = { | ||
1454 | [C(OP_READ)] = { | ||
1455 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, | ||
1456 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, | ||
1457 | }, | ||
1458 | [C(OP_WRITE)] = { | ||
1459 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, | ||
1460 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, | ||
1461 | }, | ||
1462 | [C(OP_PREFETCH)] = { | ||
1463 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1464 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1465 | }, | ||
1466 | }, | ||
1467 | [C(LL)] = { | ||
1468 | [C(OP_READ)] = { | ||
1469 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, | ||
1470 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, | ||
1471 | }, | ||
1472 | [C(OP_WRITE)] = { | ||
1473 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, | ||
1474 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, | ||
1475 | }, | ||
1476 | [C(OP_PREFETCH)] = { | ||
1477 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1478 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1479 | }, | ||
1480 | }, | ||
1481 | [C(DTLB)] = { | ||
1482 | /* | ||
1483 | * Only ITLB misses and DTLB refills are supported. | ||
1484 | * If users want the DTLB refills misses a raw counter | ||
1485 | * must be used. | ||
1486 | */ | ||
1487 | [C(OP_READ)] = { | ||
1488 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1489 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
1490 | }, | ||
1491 | [C(OP_WRITE)] = { | ||
1492 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1493 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
1494 | }, | ||
1495 | [C(OP_PREFETCH)] = { | ||
1496 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1497 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1498 | }, | ||
1499 | }, | ||
1500 | [C(ITLB)] = { | ||
1501 | [C(OP_READ)] = { | ||
1502 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1503 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
1504 | }, | ||
1505 | [C(OP_WRITE)] = { | ||
1506 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1507 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
1508 | }, | ||
1509 | [C(OP_PREFETCH)] = { | ||
1510 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1511 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1512 | }, | ||
1513 | }, | ||
1514 | [C(BPU)] = { | ||
1515 | [C(OP_READ)] = { | ||
1516 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
1517 | [C(RESULT_MISS)] | ||
1518 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1519 | }, | ||
1520 | [C(OP_WRITE)] = { | ||
1521 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
1522 | [C(RESULT_MISS)] | ||
1523 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1524 | }, | ||
1525 | [C(OP_PREFETCH)] = { | ||
1526 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1527 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1528 | }, | ||
1529 | }, | ||
1530 | }; | ||
1531 | |||
1532 | /* | ||
1533 | * Cortex-A9 HW events mapping | ||
1534 | */ | ||
1535 | static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { | ||
1536 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | ||
1537 | [PERF_COUNT_HW_INSTRUCTIONS] = | ||
1538 | ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, | ||
1539 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT, | ||
1540 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS, | ||
1541 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | ||
1542 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1543 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | ||
1544 | }; | ||
1545 | |||
1546 | static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
1547 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1548 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1549 | [C(L1D)] = { | ||
1550 | /* | ||
1551 | * The performance counters don't differentiate between read | ||
1552 | * and write accesses/misses so this isn't strictly correct, | ||
1553 | * but it's the best we can do. Writes and reads get | ||
1554 | * combined. | ||
1555 | */ | ||
1556 | [C(OP_READ)] = { | ||
1557 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
1558 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
1559 | }, | ||
1560 | [C(OP_WRITE)] = { | ||
1561 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
1562 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
1563 | }, | ||
1564 | [C(OP_PREFETCH)] = { | ||
1565 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1566 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1567 | }, | ||
1568 | }, | ||
1569 | [C(L1I)] = { | ||
1570 | [C(OP_READ)] = { | ||
1571 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1572 | [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, | ||
1573 | }, | ||
1574 | [C(OP_WRITE)] = { | ||
1575 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1576 | [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, | ||
1577 | }, | ||
1578 | [C(OP_PREFETCH)] = { | ||
1579 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1580 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1581 | }, | ||
1582 | }, | ||
1583 | [C(LL)] = { | ||
1584 | [C(OP_READ)] = { | ||
1585 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1586 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1587 | }, | ||
1588 | [C(OP_WRITE)] = { | ||
1589 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1590 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1591 | }, | ||
1592 | [C(OP_PREFETCH)] = { | ||
1593 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1594 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1595 | }, | ||
1596 | }, | ||
1597 | [C(DTLB)] = { | ||
1598 | /* | ||
1599 | * Only ITLB misses and DTLB refills are supported. | ||
1600 | * If users want the DTLB refills misses a raw counter | ||
1601 | * must be used. | ||
1602 | */ | ||
1603 | [C(OP_READ)] = { | ||
1604 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1605 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
1606 | }, | ||
1607 | [C(OP_WRITE)] = { | ||
1608 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1609 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
1610 | }, | ||
1611 | [C(OP_PREFETCH)] = { | ||
1612 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1613 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1614 | }, | ||
1615 | }, | ||
1616 | [C(ITLB)] = { | ||
1617 | [C(OP_READ)] = { | ||
1618 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1619 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
1620 | }, | ||
1621 | [C(OP_WRITE)] = { | ||
1622 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1623 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
1624 | }, | ||
1625 | [C(OP_PREFETCH)] = { | ||
1626 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1627 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1628 | }, | ||
1629 | }, | ||
1630 | [C(BPU)] = { | ||
1631 | [C(OP_READ)] = { | ||
1632 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
1633 | [C(RESULT_MISS)] | ||
1634 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1635 | }, | ||
1636 | [C(OP_WRITE)] = { | ||
1637 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
1638 | [C(RESULT_MISS)] | ||
1639 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
1640 | }, | ||
1641 | [C(OP_PREFETCH)] = { | ||
1642 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
1643 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
1644 | }, | ||
1645 | }, | ||
1646 | }; | ||
1647 | |||
1648 | /* | ||
1649 | * Perf Events counters | ||
1650 | */ | ||
1651 | enum armv7_counters { | ||
1652 | ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ | ||
1653 | ARMV7_COUNTER0 = 2, /* First event counter */ | ||
1654 | }; | ||
1655 | |||
1656 | /* | ||
1657 | * The cycle counter is ARMV7_CYCLE_COUNTER. | ||
1658 | * The first event counter is ARMV7_COUNTER0. | ||
1659 | * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1). | ||
1660 | */ | ||
1661 | #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1) | ||
1662 | |||
1663 | /* | ||
1664 | * ARMv7 low level PMNC access | ||
1665 | */ | ||
1666 | |||
1667 | /* | ||
1668 | * Per-CPU PMNC: config reg | ||
1669 | */ | ||
1670 | #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */ | ||
1671 | #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */ | ||
1672 | #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */ | ||
1673 | #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */ | ||
1674 | #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */ | ||
1675 | #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ | ||
1676 | #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */ | ||
1677 | #define ARMV7_PMNC_N_MASK 0x1f | ||
1678 | #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ | ||
1679 | |||
1680 | /* | ||
1681 | * Available counters | ||
1682 | */ | ||
1683 | #define ARMV7_CNT0 0 /* First event counter */ | ||
1684 | #define ARMV7_CCNT 31 /* Cycle counter */ | ||
1685 | |||
1686 | /* Perf Event to low level counters mapping */ | ||
1687 | #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0) | ||
1688 | |||
1689 | /* | ||
1690 | * CNTENS: counters enable reg | ||
1691 | */ | ||
1692 | #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
1693 | #define ARMV7_CNTENS_C (1 << ARMV7_CCNT) | ||
1694 | |||
1695 | /* | ||
1696 | * CNTENC: counters disable reg | ||
1697 | */ | ||
1698 | #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
1699 | #define ARMV7_CNTENC_C (1 << ARMV7_CCNT) | ||
1700 | |||
1701 | /* | ||
1702 | * INTENS: counters overflow interrupt enable reg | ||
1703 | */ | ||
1704 | #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
1705 | #define ARMV7_INTENS_C (1 << ARMV7_CCNT) | ||
1706 | |||
1707 | /* | ||
1708 | * INTENC: counters overflow interrupt disable reg | ||
1709 | */ | ||
1710 | #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
1711 | #define ARMV7_INTENC_C (1 << ARMV7_CCNT) | ||
1712 | |||
1713 | /* | ||
1714 | * EVTSEL: Event selection reg | ||
1715 | */ | ||
1716 | #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ | ||
1717 | |||
1718 | /* | ||
1719 | * SELECT: Counter selection reg | ||
1720 | */ | ||
1721 | #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */ | ||
1722 | |||
1723 | /* | ||
1724 | * FLAG: counters overflow flag status reg | ||
1725 | */ | ||
1726 | #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
1727 | #define ARMV7_FLAG_C (1 << ARMV7_CCNT) | ||
1728 | #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ | ||
1729 | #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK | ||
1730 | |||
1731 | static inline unsigned long armv7_pmnc_read(void) | ||
1732 | { | ||
1733 | u32 val; | ||
1734 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); | ||
1735 | return val; | ||
1736 | } | ||
1737 | |||
1738 | static inline void armv7_pmnc_write(unsigned long val) | ||
1739 | { | ||
1740 | val &= ARMV7_PMNC_MASK; | ||
1741 | asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); | ||
1742 | } | ||
1743 | |||
1744 | static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) | ||
1745 | { | ||
1746 | return pmnc & ARMV7_OVERFLOWED_MASK; | ||
1747 | } | ||
1748 | |||
1749 | static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, | ||
1750 | enum armv7_counters counter) | ||
1751 | { | ||
1752 | int ret = 0; | ||
1753 | |||
1754 | if (counter == ARMV7_CYCLE_COUNTER) | ||
1755 | ret = pmnc & ARMV7_FLAG_C; | ||
1756 | else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST)) | ||
1757 | ret = pmnc & ARMV7_FLAG_P(counter); | ||
1758 | else | ||
1759 | pr_err("CPU%u checking wrong counter %d overflow status\n", | ||
1760 | smp_processor_id(), counter); | ||
1761 | |||
1762 | return ret; | ||
1763 | } | ||
1764 | |||
1765 | static inline int armv7_pmnc_select_counter(unsigned int idx) | ||
1766 | { | ||
1767 | u32 val; | ||
1768 | |||
1769 | if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { | ||
1770 | pr_err("CPU%u selecting wrong PMNC counter" | ||
1771 | " %d\n", smp_processor_id(), idx); | ||
1772 | return -1; | ||
1773 | } | ||
1774 | |||
1775 | val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; | ||
1776 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); | ||
1777 | |||
1778 | return idx; | ||
1779 | } | ||
1780 | |||
1781 | static inline u32 armv7pmu_read_counter(int idx) | ||
1782 | { | ||
1783 | unsigned long value = 0; | ||
1784 | |||
1785 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1786 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); | ||
1787 | else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { | ||
1788 | if (armv7_pmnc_select_counter(idx) == idx) | ||
1789 | asm volatile("mrc p15, 0, %0, c9, c13, 2" | ||
1790 | : "=r" (value)); | ||
1791 | } else | ||
1792 | pr_err("CPU%u reading wrong counter %d\n", | ||
1793 | smp_processor_id(), idx); | ||
1794 | |||
1795 | return value; | ||
1796 | } | ||
1797 | |||
1798 | static inline void armv7pmu_write_counter(int idx, u32 value) | ||
1799 | { | ||
1800 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1801 | asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); | ||
1802 | else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { | ||
1803 | if (armv7_pmnc_select_counter(idx) == idx) | ||
1804 | asm volatile("mcr p15, 0, %0, c9, c13, 2" | ||
1805 | : : "r" (value)); | ||
1806 | } else | ||
1807 | pr_err("CPU%u writing wrong counter %d\n", | ||
1808 | smp_processor_id(), idx); | ||
1809 | } | ||
1810 | |||
1811 | static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) | ||
1812 | { | ||
1813 | if (armv7_pmnc_select_counter(idx) == idx) { | ||
1814 | val &= ARMV7_EVTSEL_MASK; | ||
1815 | asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); | ||
1816 | } | ||
1817 | } | ||
1818 | |||
1819 | static inline u32 armv7_pmnc_enable_counter(unsigned int idx) | ||
1820 | { | ||
1821 | u32 val; | ||
1822 | |||
1823 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
1824 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
1825 | pr_err("CPU%u enabling wrong PMNC counter" | ||
1826 | " %d\n", smp_processor_id(), idx); | ||
1827 | return -1; | ||
1828 | } | ||
1829 | |||
1830 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1831 | val = ARMV7_CNTENS_C; | ||
1832 | else | ||
1833 | val = ARMV7_CNTENS_P(idx); | ||
1834 | |||
1835 | asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val)); | ||
1836 | |||
1837 | return idx; | ||
1838 | } | ||
1839 | |||
1840 | static inline u32 armv7_pmnc_disable_counter(unsigned int idx) | ||
1841 | { | ||
1842 | u32 val; | ||
1843 | |||
1844 | |||
1845 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
1846 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
1847 | pr_err("CPU%u disabling wrong PMNC counter" | ||
1848 | " %d\n", smp_processor_id(), idx); | ||
1849 | return -1; | ||
1850 | } | ||
1851 | |||
1852 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1853 | val = ARMV7_CNTENC_C; | ||
1854 | else | ||
1855 | val = ARMV7_CNTENC_P(idx); | ||
1856 | |||
1857 | asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val)); | ||
1858 | |||
1859 | return idx; | ||
1860 | } | ||
1861 | |||
1862 | static inline u32 armv7_pmnc_enable_intens(unsigned int idx) | ||
1863 | { | ||
1864 | u32 val; | ||
1865 | |||
1866 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
1867 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
1868 | pr_err("CPU%u enabling wrong PMNC counter" | ||
1869 | " interrupt enable %d\n", smp_processor_id(), idx); | ||
1870 | return -1; | ||
1871 | } | ||
1872 | |||
1873 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1874 | val = ARMV7_INTENS_C; | ||
1875 | else | ||
1876 | val = ARMV7_INTENS_P(idx); | ||
1877 | |||
1878 | asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val)); | ||
1879 | |||
1880 | return idx; | ||
1881 | } | ||
1882 | |||
1883 | static inline u32 armv7_pmnc_disable_intens(unsigned int idx) | ||
1884 | { | ||
1885 | u32 val; | ||
1886 | |||
1887 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
1888 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
1889 | pr_err("CPU%u disabling wrong PMNC counter" | ||
1890 | " interrupt enable %d\n", smp_processor_id(), idx); | ||
1891 | return -1; | ||
1892 | } | ||
1893 | |||
1894 | if (idx == ARMV7_CYCLE_COUNTER) | ||
1895 | val = ARMV7_INTENC_C; | ||
1896 | else | ||
1897 | val = ARMV7_INTENC_P(idx); | ||
1898 | |||
1899 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val)); | ||
1900 | |||
1901 | return idx; | ||
1902 | } | ||
1903 | |||
1904 | static inline u32 armv7_pmnc_getreset_flags(void) | ||
1905 | { | ||
1906 | u32 val; | ||
1907 | |||
1908 | /* Read */ | ||
1909 | asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); | ||
1910 | |||
1911 | /* Write to clear flags */ | ||
1912 | val &= ARMV7_FLAG_MASK; | ||
1913 | asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val)); | ||
1914 | |||
1915 | return val; | ||
1916 | } | ||
1917 | |||
1918 | #ifdef DEBUG | ||
1919 | static void armv7_pmnc_dump_regs(void) | ||
1920 | { | ||
1921 | u32 val; | ||
1922 | unsigned int cnt; | ||
1923 | |||
1924 | printk(KERN_INFO "PMNC registers dump:\n"); | ||
1925 | |||
1926 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val)); | ||
1927 | printk(KERN_INFO "PMNC =0x%08x\n", val); | ||
1928 | |||
1929 | asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val)); | ||
1930 | printk(KERN_INFO "CNTENS=0x%08x\n", val); | ||
1931 | |||
1932 | asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val)); | ||
1933 | printk(KERN_INFO "INTENS=0x%08x\n", val); | ||
1934 | |||
1935 | asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); | ||
1936 | printk(KERN_INFO "FLAGS =0x%08x\n", val); | ||
1937 | |||
1938 | asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val)); | ||
1939 | printk(KERN_INFO "SELECT=0x%08x\n", val); | ||
1940 | |||
1941 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); | ||
1942 | printk(KERN_INFO "CCNT =0x%08x\n", val); | ||
1943 | |||
1944 | for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { | ||
1945 | armv7_pmnc_select_counter(cnt); | ||
1946 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); | ||
1947 | printk(KERN_INFO "CNT[%d] count =0x%08x\n", | ||
1948 | cnt-ARMV7_EVENT_CNT_TO_CNTx, val); | ||
1949 | asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); | ||
1950 | printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", | ||
1951 | cnt-ARMV7_EVENT_CNT_TO_CNTx, val); | ||
1952 | } | ||
1953 | } | ||
1954 | #endif | ||
1955 | |||
1956 | void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
1957 | { | ||
1958 | unsigned long flags; | ||
1959 | |||
1960 | /* | ||
1961 | * Enable counter and interrupt, and set the counter to count | ||
1962 | * the event that we're interested in. | ||
1963 | */ | ||
1964 | spin_lock_irqsave(&pmu_lock, flags); | ||
1965 | |||
1966 | /* | ||
1967 | * Disable counter | ||
1968 | */ | ||
1969 | armv7_pmnc_disable_counter(idx); | ||
1970 | |||
1971 | /* | ||
1972 | * Set event (if destined for PMNx counters) | ||
1973 | * We don't need to set the event if it's a cycle count | ||
1974 | */ | ||
1975 | if (idx != ARMV7_CYCLE_COUNTER) | ||
1976 | armv7_pmnc_write_evtsel(idx, hwc->config_base); | ||
1977 | |||
1978 | /* | ||
1979 | * Enable interrupt for this counter | ||
1980 | */ | ||
1981 | armv7_pmnc_enable_intens(idx); | ||
1982 | |||
1983 | /* | ||
1984 | * Enable counter | ||
1985 | */ | ||
1986 | armv7_pmnc_enable_counter(idx); | ||
1987 | |||
1988 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
1989 | } | ||
1990 | |||
1991 | static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
1992 | { | ||
1993 | unsigned long flags; | ||
1994 | |||
1995 | /* | ||
1996 | * Disable counter and interrupt | ||
1997 | */ | ||
1998 | spin_lock_irqsave(&pmu_lock, flags); | ||
1999 | |||
2000 | /* | ||
2001 | * Disable counter | ||
2002 | */ | ||
2003 | armv7_pmnc_disable_counter(idx); | ||
2004 | |||
2005 | /* | ||
2006 | * Disable interrupt for this counter | ||
2007 | */ | ||
2008 | armv7_pmnc_disable_intens(idx); | ||
2009 | |||
2010 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
2011 | } | ||
2012 | |||
2013 | static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | ||
2014 | { | ||
2015 | unsigned long pmnc; | ||
2016 | struct perf_sample_data data; | ||
2017 | struct cpu_hw_events *cpuc; | ||
2018 | struct pt_regs *regs; | ||
2019 | int idx; | ||
2020 | |||
2021 | /* | ||
2022 | * Get and reset the IRQ flags | ||
2023 | */ | ||
2024 | pmnc = armv7_pmnc_getreset_flags(); | ||
2025 | |||
2026 | /* | ||
2027 | * Did an overflow occur? | ||
2028 | */ | ||
2029 | if (!armv7_pmnc_has_overflowed(pmnc)) | ||
2030 | return IRQ_NONE; | ||
2031 | |||
2032 | /* | ||
2033 | * Handle the counter(s) overflow(s) | ||
2034 | */ | ||
2035 | regs = get_irq_regs(); | ||
2036 | |||
2037 | perf_sample_data_init(&data, 0); | ||
2038 | |||
2039 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
2040 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
2041 | struct perf_event *event = cpuc->events[idx]; | ||
2042 | struct hw_perf_event *hwc; | ||
2043 | |||
2044 | if (!test_bit(idx, cpuc->active_mask)) | ||
2045 | continue; | ||
2046 | |||
2047 | /* | ||
2048 | * We have a single interrupt for all counters. Check that | ||
2049 | * each counter has overflowed before we process it. | ||
2050 | */ | ||
2051 | if (!armv7_pmnc_counter_has_overflowed(pmnc, idx)) | ||
2052 | continue; | ||
2053 | |||
2054 | hwc = &event->hw; | ||
2055 | armpmu_event_update(event, hwc, idx); | ||
2056 | data.period = event->hw.last_period; | ||
2057 | if (!armpmu_event_set_period(event, hwc, idx)) | ||
2058 | continue; | ||
2059 | |||
2060 | if (perf_event_overflow(event, 0, &data, regs)) | ||
2061 | armpmu->disable(hwc, idx); | ||
2062 | } | ||
2063 | |||
2064 | /* | ||
2065 | * Handle the pending perf events. | ||
2066 | * | ||
2067 | * Note: this call *must* be run with interrupts disabled. For | ||
2068 | * platforms that can have the PMU interrupts raised as an NMI, this | ||
2069 | * will not work. | ||
2070 | */ | ||
2071 | irq_work_run(); | ||
2072 | |||
2073 | return IRQ_HANDLED; | ||
2074 | } | ||
2075 | |||
2076 | static void armv7pmu_start(void) | ||
2077 | { | ||
2078 | unsigned long flags; | ||
2079 | |||
2080 | spin_lock_irqsave(&pmu_lock, flags); | ||
2081 | /* Enable all counters */ | ||
2082 | armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); | ||
2083 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
2084 | } | ||
2085 | |||
2086 | static void armv7pmu_stop(void) | ||
2087 | { | ||
2088 | unsigned long flags; | ||
2089 | |||
2090 | spin_lock_irqsave(&pmu_lock, flags); | ||
2091 | /* Disable all counters */ | ||
2092 | armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); | ||
2093 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
2094 | } | ||
2095 | |||
2096 | static inline int armv7_a8_pmu_event_map(int config) | ||
2097 | { | ||
2098 | int mapping = armv7_a8_perf_map[config]; | ||
2099 | if (HW_OP_UNSUPPORTED == mapping) | ||
2100 | mapping = -EOPNOTSUPP; | ||
2101 | return mapping; | ||
2102 | } | ||
2103 | |||
2104 | static inline int armv7_a9_pmu_event_map(int config) | ||
2105 | { | ||
2106 | int mapping = armv7_a9_perf_map[config]; | ||
2107 | if (HW_OP_UNSUPPORTED == mapping) | ||
2108 | mapping = -EOPNOTSUPP; | ||
2109 | return mapping; | ||
2110 | } | ||
2111 | |||
2112 | static u64 armv7pmu_raw_event(u64 config) | ||
2113 | { | ||
2114 | return config & 0xff; | ||
2115 | } | ||
2116 | |||
2117 | static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, | ||
2118 | struct hw_perf_event *event) | ||
2119 | { | ||
2120 | int idx; | ||
2121 | |||
2122 | /* Always place a cycle counter into the cycle counter. */ | ||
2123 | if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { | ||
2124 | if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) | ||
2125 | return -EAGAIN; | ||
2126 | |||
2127 | return ARMV7_CYCLE_COUNTER; | ||
2128 | } else { | ||
2129 | /* | ||
2130 | * For anything other than a cycle counter, try and use | ||
2131 | * the events counters | ||
2132 | */ | ||
2133 | for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) { | ||
2134 | if (!test_and_set_bit(idx, cpuc->used_mask)) | ||
2135 | return idx; | ||
2136 | } | ||
2137 | |||
2138 | /* The counters are all in use. */ | ||
2139 | return -EAGAIN; | ||
2140 | } | ||
2141 | } | ||
2142 | |||
2143 | static struct arm_pmu armv7pmu = { | ||
2144 | .handle_irq = armv7pmu_handle_irq, | ||
2145 | .enable = armv7pmu_enable_event, | ||
2146 | .disable = armv7pmu_disable_event, | ||
2147 | .raw_event = armv7pmu_raw_event, | ||
2148 | .read_counter = armv7pmu_read_counter, | ||
2149 | .write_counter = armv7pmu_write_counter, | ||
2150 | .get_event_idx = armv7pmu_get_event_idx, | ||
2151 | .start = armv7pmu_start, | ||
2152 | .stop = armv7pmu_stop, | ||
2153 | .max_period = (1LLU << 32) - 1, | ||
2154 | }; | ||
2155 | |||
2156 | static u32 __init armv7_reset_read_pmnc(void) | ||
2157 | { | ||
2158 | u32 nb_cnt; | ||
2159 | |||
2160 | /* Initialize & Reset PMNC: C and P bits */ | ||
2161 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | ||
2162 | |||
2163 | /* Read the nb of CNTx counters supported from PMNC */ | ||
2164 | nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; | ||
2165 | |||
2166 | /* Add the CPU cycles counter and return */ | ||
2167 | return nb_cnt + 1; | ||
2168 | } | ||
2169 | |||
2170 | /* | ||
2171 | * ARMv5 [xscale] Performance counter handling code. | ||
2172 | * | ||
2173 | * Based on xscale OProfile code. | ||
2174 | * | ||
2175 | * There are two variants of the xscale PMU that we support: | ||
2176 | * - xscale1pmu: 2 event counters and a cycle counter | ||
2177 | * - xscale2pmu: 4 event counters and a cycle counter | ||
2178 | * The two variants share event definitions, but have different | ||
2179 | * PMU structures. | ||
2180 | */ | ||
2181 | |||
2182 | enum xscale_perf_types { | ||
2183 | XSCALE_PERFCTR_ICACHE_MISS = 0x00, | ||
2184 | XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01, | ||
2185 | XSCALE_PERFCTR_DATA_STALL = 0x02, | ||
2186 | XSCALE_PERFCTR_ITLB_MISS = 0x03, | ||
2187 | XSCALE_PERFCTR_DTLB_MISS = 0x04, | ||
2188 | XSCALE_PERFCTR_BRANCH = 0x05, | ||
2189 | XSCALE_PERFCTR_BRANCH_MISS = 0x06, | ||
2190 | XSCALE_PERFCTR_INSTRUCTION = 0x07, | ||
2191 | XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08, | ||
2192 | XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09, | ||
2193 | XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A, | ||
2194 | XSCALE_PERFCTR_DCACHE_MISS = 0x0B, | ||
2195 | XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C, | ||
2196 | XSCALE_PERFCTR_PC_CHANGED = 0x0D, | ||
2197 | XSCALE_PERFCTR_BCU_REQUEST = 0x10, | ||
2198 | XSCALE_PERFCTR_BCU_FULL = 0x11, | ||
2199 | XSCALE_PERFCTR_BCU_DRAIN = 0x12, | ||
2200 | XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14, | ||
2201 | XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15, | ||
2202 | XSCALE_PERFCTR_RMW = 0x16, | ||
2203 | /* XSCALE_PERFCTR_CCNT is not hardware defined */ | ||
2204 | XSCALE_PERFCTR_CCNT = 0xFE, | ||
2205 | XSCALE_PERFCTR_UNUSED = 0xFF, | ||
2206 | }; | ||
2207 | |||
2208 | enum xscale_counters { | ||
2209 | XSCALE_CYCLE_COUNTER = 1, | ||
2210 | XSCALE_COUNTER0, | ||
2211 | XSCALE_COUNTER1, | ||
2212 | XSCALE_COUNTER2, | ||
2213 | XSCALE_COUNTER3, | ||
2214 | }; | ||
2215 | |||
2216 | static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = { | ||
2217 | [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT, | ||
2218 | [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION, | ||
2219 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
2220 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
2221 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH, | ||
2222 | [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS, | ||
2223 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | ||
2224 | }; | ||
2225 | |||
2226 | static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
2227 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
2228 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
2229 | [C(L1D)] = { | ||
2230 | [C(OP_READ)] = { | ||
2231 | [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, | ||
2232 | [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, | ||
2233 | }, | ||
2234 | [C(OP_WRITE)] = { | ||
2235 | [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, | ||
2236 | [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, | ||
2237 | }, | ||
2238 | [C(OP_PREFETCH)] = { | ||
2239 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
2240 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
2241 | }, | ||
2242 | }, | ||
2243 | [C(L1I)] = { | ||
2244 | [C(OP_READ)] = { | ||
2245 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
2246 | [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, | ||
2247 | }, | ||
2248 | [C(OP_WRITE)] = { | ||
2249 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
2250 | [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, | ||
2251 | }, | ||
2252 | [C(OP_PREFETCH)] = { | ||
2253 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
2254 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
2255 | }, | ||
2256 | }, | ||
2257 | [C(LL)] = { | ||
2258 | [C(OP_READ)] = { | ||
2259 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
2260 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
2261 | }, | ||
2262 | [C(OP_WRITE)] = { | ||
2263 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
2264 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
2265 | }, | ||
2266 | [C(OP_PREFETCH)] = { | ||
2267 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
2268 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
2269 | }, | ||
2270 | }, | ||
2271 | [C(DTLB)] = { | ||
2272 | [C(OP_READ)] = { | ||
2273 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
2274 | [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, | ||
2275 | }, | ||
2276 | [C(OP_WRITE)] = { | ||
2277 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
2278 | [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, | ||
2279 | }, | ||
2280 | [C(OP_PREFETCH)] = { | ||
2281 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
2282 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
2283 | }, | ||
2284 | }, | ||
2285 | [C(ITLB)] = { | ||
2286 | [C(OP_READ)] = { | ||
2287 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
2288 | [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, | ||
2289 | }, | ||
2290 | [C(OP_WRITE)] = { | ||
2291 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
2292 | [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, | ||
2293 | }, | ||
2294 | [C(OP_PREFETCH)] = { | ||
2295 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
2296 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
2297 | }, | ||
2298 | }, | ||
2299 | [C(BPU)] = { | ||
2300 | [C(OP_READ)] = { | ||
2301 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
2302 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
2303 | }, | ||
2304 | [C(OP_WRITE)] = { | ||
2305 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
2306 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
2307 | }, | ||
2308 | [C(OP_PREFETCH)] = { | ||
2309 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
2310 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
2311 | }, | ||
2312 | }, | ||
2313 | }; | ||
2314 | |||
2315 | #define XSCALE_PMU_ENABLE 0x001 | ||
2316 | #define XSCALE_PMN_RESET 0x002 | ||
2317 | #define XSCALE_CCNT_RESET 0x004 | ||
2318 | #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET) | ||
2319 | #define XSCALE_PMU_CNT64 0x008 | ||
2320 | |||
2321 | static inline int | ||
2322 | xscalepmu_event_map(int config) | ||
2323 | { | ||
2324 | int mapping = xscale_perf_map[config]; | ||
2325 | if (HW_OP_UNSUPPORTED == mapping) | ||
2326 | mapping = -EOPNOTSUPP; | ||
2327 | return mapping; | ||
2328 | } | ||
2329 | |||
2330 | static u64 | ||
2331 | xscalepmu_raw_event(u64 config) | ||
2332 | { | ||
2333 | return config & 0xff; | ||
2334 | } | ||
2335 | |||
2336 | #define XSCALE1_OVERFLOWED_MASK 0x700 | ||
2337 | #define XSCALE1_CCOUNT_OVERFLOW 0x400 | ||
2338 | #define XSCALE1_COUNT0_OVERFLOW 0x100 | ||
2339 | #define XSCALE1_COUNT1_OVERFLOW 0x200 | ||
2340 | #define XSCALE1_CCOUNT_INT_EN 0x040 | ||
2341 | #define XSCALE1_COUNT0_INT_EN 0x010 | ||
2342 | #define XSCALE1_COUNT1_INT_EN 0x020 | ||
2343 | #define XSCALE1_COUNT0_EVT_SHFT 12 | ||
2344 | #define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT) | ||
2345 | #define XSCALE1_COUNT1_EVT_SHFT 20 | ||
2346 | #define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT) | ||
2347 | |||
2348 | static inline u32 | ||
2349 | xscale1pmu_read_pmnc(void) | ||
2350 | { | ||
2351 | u32 val; | ||
2352 | asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val)); | ||
2353 | return val; | ||
2354 | } | ||
2355 | |||
2356 | static inline void | ||
2357 | xscale1pmu_write_pmnc(u32 val) | ||
2358 | { | ||
2359 | /* upper 4bits and 7, 11 are write-as-0 */ | ||
2360 | val &= 0xffff77f; | ||
2361 | asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val)); | ||
2362 | } | ||
2363 | |||
2364 | static inline int | ||
2365 | xscale1_pmnc_counter_has_overflowed(unsigned long pmnc, | ||
2366 | enum xscale_counters counter) | ||
2367 | { | ||
2368 | int ret = 0; | ||
2369 | |||
2370 | switch (counter) { | ||
2371 | case XSCALE_CYCLE_COUNTER: | ||
2372 | ret = pmnc & XSCALE1_CCOUNT_OVERFLOW; | ||
2373 | break; | ||
2374 | case XSCALE_COUNTER0: | ||
2375 | ret = pmnc & XSCALE1_COUNT0_OVERFLOW; | ||
2376 | break; | ||
2377 | case XSCALE_COUNTER1: | ||
2378 | ret = pmnc & XSCALE1_COUNT1_OVERFLOW; | ||
2379 | break; | ||
2380 | default: | ||
2381 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
2382 | } | ||
2383 | |||
2384 | return ret; | ||
2385 | } | ||
2386 | |||
2387 | static irqreturn_t | ||
2388 | xscale1pmu_handle_irq(int irq_num, void *dev) | ||
2389 | { | ||
2390 | unsigned long pmnc; | ||
2391 | struct perf_sample_data data; | ||
2392 | struct cpu_hw_events *cpuc; | ||
2393 | struct pt_regs *regs; | ||
2394 | int idx; | ||
2395 | |||
2396 | /* | ||
2397 | * NOTE: there's an A stepping erratum that states if an overflow | ||
2398 | * bit already exists and another occurs, the previous | ||
2399 | * Overflow bit gets cleared. There's no workaround. | ||
2400 | * Fixed in B stepping or later. | ||
2401 | */ | ||
2402 | pmnc = xscale1pmu_read_pmnc(); | ||
2403 | |||
2404 | /* | ||
2405 | * Write the value back to clear the overflow flags. Overflow | ||
2406 | * flags remain in pmnc for use below. We also disable the PMU | ||
2407 | * while we process the interrupt. | ||
2408 | */ | ||
2409 | xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); | ||
2410 | |||
2411 | if (!(pmnc & XSCALE1_OVERFLOWED_MASK)) | ||
2412 | return IRQ_NONE; | ||
2413 | |||
2414 | regs = get_irq_regs(); | ||
2415 | |||
2416 | perf_sample_data_init(&data, 0); | ||
2417 | |||
2418 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
2419 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
2420 | struct perf_event *event = cpuc->events[idx]; | ||
2421 | struct hw_perf_event *hwc; | ||
2422 | |||
2423 | if (!test_bit(idx, cpuc->active_mask)) | ||
2424 | continue; | ||
2425 | |||
2426 | if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) | ||
2427 | continue; | ||
2428 | |||
2429 | hwc = &event->hw; | ||
2430 | armpmu_event_update(event, hwc, idx); | ||
2431 | data.period = event->hw.last_period; | ||
2432 | if (!armpmu_event_set_period(event, hwc, idx)) | ||
2433 | continue; | ||
2434 | |||
2435 | if (perf_event_overflow(event, 0, &data, regs)) | ||
2436 | armpmu->disable(hwc, idx); | ||
2437 | } | ||
2438 | |||
2439 | irq_work_run(); | ||
2440 | |||
2441 | /* | ||
2442 | * Re-enable the PMU. | ||
2443 | */ | ||
2444 | pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE; | ||
2445 | xscale1pmu_write_pmnc(pmnc); | ||
2446 | |||
2447 | return IRQ_HANDLED; | ||
2448 | } | ||
2449 | |||
2450 | static void | ||
2451 | xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
2452 | { | ||
2453 | unsigned long val, mask, evt, flags; | ||
2454 | |||
2455 | switch (idx) { | ||
2456 | case XSCALE_CYCLE_COUNTER: | ||
2457 | mask = 0; | ||
2458 | evt = XSCALE1_CCOUNT_INT_EN; | ||
2459 | break; | ||
2460 | case XSCALE_COUNTER0: | ||
2461 | mask = XSCALE1_COUNT0_EVT_MASK; | ||
2462 | evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) | | ||
2463 | XSCALE1_COUNT0_INT_EN; | ||
2464 | break; | ||
2465 | case XSCALE_COUNTER1: | ||
2466 | mask = XSCALE1_COUNT1_EVT_MASK; | ||
2467 | evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) | | ||
2468 | XSCALE1_COUNT1_INT_EN; | ||
2469 | break; | ||
2470 | default: | ||
2471 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
2472 | return; | ||
2473 | } | ||
2474 | |||
2475 | spin_lock_irqsave(&pmu_lock, flags); | ||
2476 | val = xscale1pmu_read_pmnc(); | ||
2477 | val &= ~mask; | ||
2478 | val |= evt; | ||
2479 | xscale1pmu_write_pmnc(val); | ||
2480 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
2481 | } | ||
2482 | |||
2483 | static void | ||
2484 | xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
2485 | { | ||
2486 | unsigned long val, mask, evt, flags; | ||
2487 | |||
2488 | switch (idx) { | ||
2489 | case XSCALE_CYCLE_COUNTER: | ||
2490 | mask = XSCALE1_CCOUNT_INT_EN; | ||
2491 | evt = 0; | ||
2492 | break; | ||
2493 | case XSCALE_COUNTER0: | ||
2494 | mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK; | ||
2495 | evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT; | ||
2496 | break; | ||
2497 | case XSCALE_COUNTER1: | ||
2498 | mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK; | ||
2499 | evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT; | ||
2500 | break; | ||
2501 | default: | ||
2502 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
2503 | return; | ||
2504 | } | ||
2505 | |||
2506 | spin_lock_irqsave(&pmu_lock, flags); | ||
2507 | val = xscale1pmu_read_pmnc(); | ||
2508 | val &= ~mask; | ||
2509 | val |= evt; | ||
2510 | xscale1pmu_write_pmnc(val); | ||
2511 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
2512 | } | ||
2513 | |||
2514 | static int | ||
2515 | xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc, | ||
2516 | struct hw_perf_event *event) | ||
2517 | { | ||
2518 | if (XSCALE_PERFCTR_CCNT == event->config_base) { | ||
2519 | if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) | ||
2520 | return -EAGAIN; | ||
2521 | |||
2522 | return XSCALE_CYCLE_COUNTER; | ||
2523 | } else { | ||
2524 | if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) { | ||
2525 | return XSCALE_COUNTER1; | ||
2526 | } | ||
2527 | |||
2528 | if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) { | ||
2529 | return XSCALE_COUNTER0; | ||
2530 | } | ||
2531 | |||
2532 | return -EAGAIN; | ||
2533 | } | ||
2534 | } | ||
2535 | |||
2536 | static void | ||
2537 | xscale1pmu_start(void) | ||
2538 | { | ||
2539 | unsigned long flags, val; | ||
2540 | |||
2541 | spin_lock_irqsave(&pmu_lock, flags); | ||
2542 | val = xscale1pmu_read_pmnc(); | ||
2543 | val |= XSCALE_PMU_ENABLE; | ||
2544 | xscale1pmu_write_pmnc(val); | ||
2545 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
2546 | } | ||
2547 | |||
2548 | static void | ||
2549 | xscale1pmu_stop(void) | ||
2550 | { | ||
2551 | unsigned long flags, val; | ||
2552 | |||
2553 | spin_lock_irqsave(&pmu_lock, flags); | ||
2554 | val = xscale1pmu_read_pmnc(); | ||
2555 | val &= ~XSCALE_PMU_ENABLE; | ||
2556 | xscale1pmu_write_pmnc(val); | ||
2557 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
2558 | } | ||
2559 | |||
2560 | static inline u32 | ||
2561 | xscale1pmu_read_counter(int counter) | ||
2562 | { | ||
2563 | u32 val = 0; | ||
2564 | |||
2565 | switch (counter) { | ||
2566 | case XSCALE_CYCLE_COUNTER: | ||
2567 | asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val)); | ||
2568 | break; | ||
2569 | case XSCALE_COUNTER0: | ||
2570 | asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val)); | ||
2571 | break; | ||
2572 | case XSCALE_COUNTER1: | ||
2573 | asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val)); | ||
2574 | break; | ||
2575 | } | ||
2576 | |||
2577 | return val; | ||
2578 | } | ||
2579 | |||
2580 | static inline void | ||
2581 | xscale1pmu_write_counter(int counter, u32 val) | ||
2582 | { | ||
2583 | switch (counter) { | ||
2584 | case XSCALE_CYCLE_COUNTER: | ||
2585 | asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); | ||
2586 | break; | ||
2587 | case XSCALE_COUNTER0: | ||
2588 | asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val)); | ||
2589 | break; | ||
2590 | case XSCALE_COUNTER1: | ||
2591 | asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val)); | ||
2592 | break; | ||
2593 | } | ||
2594 | } | ||
2595 | |||
2596 | static const struct arm_pmu xscale1pmu = { | ||
2597 | .id = ARM_PERF_PMU_ID_XSCALE1, | ||
2598 | .handle_irq = xscale1pmu_handle_irq, | ||
2599 | .enable = xscale1pmu_enable_event, | ||
2600 | .disable = xscale1pmu_disable_event, | ||
2601 | .event_map = xscalepmu_event_map, | ||
2602 | .raw_event = xscalepmu_raw_event, | ||
2603 | .read_counter = xscale1pmu_read_counter, | ||
2604 | .write_counter = xscale1pmu_write_counter, | ||
2605 | .get_event_idx = xscale1pmu_get_event_idx, | ||
2606 | .start = xscale1pmu_start, | ||
2607 | .stop = xscale1pmu_stop, | ||
2608 | .num_events = 3, | ||
2609 | .max_period = (1LLU << 32) - 1, | ||
2610 | }; | ||
2611 | |||
2612 | #define XSCALE2_OVERFLOWED_MASK 0x01f | ||
2613 | #define XSCALE2_CCOUNT_OVERFLOW 0x001 | ||
2614 | #define XSCALE2_COUNT0_OVERFLOW 0x002 | ||
2615 | #define XSCALE2_COUNT1_OVERFLOW 0x004 | ||
2616 | #define XSCALE2_COUNT2_OVERFLOW 0x008 | ||
2617 | #define XSCALE2_COUNT3_OVERFLOW 0x010 | ||
2618 | #define XSCALE2_CCOUNT_INT_EN 0x001 | ||
2619 | #define XSCALE2_COUNT0_INT_EN 0x002 | ||
2620 | #define XSCALE2_COUNT1_INT_EN 0x004 | ||
2621 | #define XSCALE2_COUNT2_INT_EN 0x008 | ||
2622 | #define XSCALE2_COUNT3_INT_EN 0x010 | ||
2623 | #define XSCALE2_COUNT0_EVT_SHFT 0 | ||
2624 | #define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT) | ||
2625 | #define XSCALE2_COUNT1_EVT_SHFT 8 | ||
2626 | #define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT) | ||
2627 | #define XSCALE2_COUNT2_EVT_SHFT 16 | ||
2628 | #define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT) | ||
2629 | #define XSCALE2_COUNT3_EVT_SHFT 24 | ||
2630 | #define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT) | ||
2631 | |||
2632 | static inline u32 | ||
2633 | xscale2pmu_read_pmnc(void) | ||
2634 | { | ||
2635 | u32 val; | ||
2636 | asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val)); | ||
2637 | /* bits 1-2 and 4-23 are read-unpredictable */ | ||
2638 | return val & 0xff000009; | ||
2639 | } | ||
2640 | |||
2641 | static inline void | ||
2642 | xscale2pmu_write_pmnc(u32 val) | ||
2643 | { | ||
2644 | /* bits 4-23 are write-as-0, 24-31 are write ignored */ | ||
2645 | val &= 0xf; | ||
2646 | asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val)); | ||
2647 | } | ||
2648 | |||
2649 | static inline u32 | ||
2650 | xscale2pmu_read_overflow_flags(void) | ||
2651 | { | ||
2652 | u32 val; | ||
2653 | asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val)); | ||
2654 | return val; | ||
2655 | } | ||
2656 | |||
2657 | static inline void | ||
2658 | xscale2pmu_write_overflow_flags(u32 val) | ||
2659 | { | ||
2660 | asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val)); | ||
2661 | } | ||
2662 | |||
2663 | static inline u32 | ||
2664 | xscale2pmu_read_event_select(void) | ||
2665 | { | ||
2666 | u32 val; | ||
2667 | asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val)); | ||
2668 | return val; | ||
2669 | } | ||
2670 | |||
2671 | static inline void | ||
2672 | xscale2pmu_write_event_select(u32 val) | ||
2673 | { | ||
2674 | asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val)); | ||
2675 | } | ||
2676 | |||
2677 | static inline u32 | ||
2678 | xscale2pmu_read_int_enable(void) | ||
2679 | { | ||
2680 | u32 val; | ||
2681 | asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val)); | ||
2682 | return val; | ||
2683 | } | ||
2684 | |||
2685 | static void | ||
2686 | xscale2pmu_write_int_enable(u32 val) | ||
2687 | { | ||
2688 | asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val)); | ||
2689 | } | ||
2690 | |||
2691 | static inline int | ||
2692 | xscale2_pmnc_counter_has_overflowed(unsigned long of_flags, | ||
2693 | enum xscale_counters counter) | ||
2694 | { | ||
2695 | int ret = 0; | ||
2696 | |||
2697 | switch (counter) { | ||
2698 | case XSCALE_CYCLE_COUNTER: | ||
2699 | ret = of_flags & XSCALE2_CCOUNT_OVERFLOW; | ||
2700 | break; | ||
2701 | case XSCALE_COUNTER0: | ||
2702 | ret = of_flags & XSCALE2_COUNT0_OVERFLOW; | ||
2703 | break; | ||
2704 | case XSCALE_COUNTER1: | ||
2705 | ret = of_flags & XSCALE2_COUNT1_OVERFLOW; | ||
2706 | break; | ||
2707 | case XSCALE_COUNTER2: | ||
2708 | ret = of_flags & XSCALE2_COUNT2_OVERFLOW; | ||
2709 | break; | ||
2710 | case XSCALE_COUNTER3: | ||
2711 | ret = of_flags & XSCALE2_COUNT3_OVERFLOW; | ||
2712 | break; | ||
2713 | default: | ||
2714 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
2715 | } | ||
2716 | |||
2717 | return ret; | ||
2718 | } | ||
2719 | |||
2720 | static irqreturn_t | ||
2721 | xscale2pmu_handle_irq(int irq_num, void *dev) | ||
2722 | { | ||
2723 | unsigned long pmnc, of_flags; | ||
2724 | struct perf_sample_data data; | ||
2725 | struct cpu_hw_events *cpuc; | ||
2726 | struct pt_regs *regs; | ||
2727 | int idx; | ||
2728 | |||
2729 | /* Disable the PMU. */ | ||
2730 | pmnc = xscale2pmu_read_pmnc(); | ||
2731 | xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); | ||
2732 | |||
2733 | /* Check the overflow flag register. */ | ||
2734 | of_flags = xscale2pmu_read_overflow_flags(); | ||
2735 | if (!(of_flags & XSCALE2_OVERFLOWED_MASK)) | ||
2736 | return IRQ_NONE; | ||
2737 | |||
2738 | /* Clear the overflow bits. */ | ||
2739 | xscale2pmu_write_overflow_flags(of_flags); | ||
2740 | |||
2741 | regs = get_irq_regs(); | ||
2742 | |||
2743 | perf_sample_data_init(&data, 0); | ||
2744 | |||
2745 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
2746 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
2747 | struct perf_event *event = cpuc->events[idx]; | ||
2748 | struct hw_perf_event *hwc; | ||
2749 | |||
2750 | if (!test_bit(idx, cpuc->active_mask)) | ||
2751 | continue; | ||
2752 | |||
2753 | if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) | ||
2754 | continue; | ||
2755 | |||
2756 | hwc = &event->hw; | ||
2757 | armpmu_event_update(event, hwc, idx); | ||
2758 | data.period = event->hw.last_period; | ||
2759 | if (!armpmu_event_set_period(event, hwc, idx)) | ||
2760 | continue; | ||
2761 | |||
2762 | if (perf_event_overflow(event, 0, &data, regs)) | ||
2763 | armpmu->disable(hwc, idx); | ||
2764 | } | ||
2765 | |||
2766 | irq_work_run(); | ||
2767 | |||
2768 | /* | ||
2769 | * Re-enable the PMU. | ||
2770 | */ | ||
2771 | pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE; | ||
2772 | xscale2pmu_write_pmnc(pmnc); | ||
2773 | |||
2774 | return IRQ_HANDLED; | ||
2775 | } | ||
2776 | |||
2777 | static void | ||
2778 | xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
2779 | { | ||
2780 | unsigned long flags, ien, evtsel; | ||
2781 | |||
2782 | ien = xscale2pmu_read_int_enable(); | ||
2783 | evtsel = xscale2pmu_read_event_select(); | ||
2784 | |||
2785 | switch (idx) { | ||
2786 | case XSCALE_CYCLE_COUNTER: | ||
2787 | ien |= XSCALE2_CCOUNT_INT_EN; | ||
2788 | break; | ||
2789 | case XSCALE_COUNTER0: | ||
2790 | ien |= XSCALE2_COUNT0_INT_EN; | ||
2791 | evtsel &= ~XSCALE2_COUNT0_EVT_MASK; | ||
2792 | evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT; | ||
2793 | break; | ||
2794 | case XSCALE_COUNTER1: | ||
2795 | ien |= XSCALE2_COUNT1_INT_EN; | ||
2796 | evtsel &= ~XSCALE2_COUNT1_EVT_MASK; | ||
2797 | evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT; | ||
2798 | break; | ||
2799 | case XSCALE_COUNTER2: | ||
2800 | ien |= XSCALE2_COUNT2_INT_EN; | ||
2801 | evtsel &= ~XSCALE2_COUNT2_EVT_MASK; | ||
2802 | evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT; | ||
2803 | break; | ||
2804 | case XSCALE_COUNTER3: | ||
2805 | ien |= XSCALE2_COUNT3_INT_EN; | ||
2806 | evtsel &= ~XSCALE2_COUNT3_EVT_MASK; | ||
2807 | evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT; | ||
2808 | break; | ||
2809 | default: | ||
2810 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
2811 | return; | ||
2812 | } | ||
2813 | |||
2814 | spin_lock_irqsave(&pmu_lock, flags); | ||
2815 | xscale2pmu_write_event_select(evtsel); | ||
2816 | xscale2pmu_write_int_enable(ien); | ||
2817 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
2818 | } | ||
2819 | |||
2820 | static void | ||
2821 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
2822 | { | ||
2823 | unsigned long flags, ien, evtsel; | ||
2824 | |||
2825 | ien = xscale2pmu_read_int_enable(); | ||
2826 | evtsel = xscale2pmu_read_event_select(); | ||
2827 | |||
2828 | switch (idx) { | ||
2829 | case XSCALE_CYCLE_COUNTER: | ||
2830 | ien &= ~XSCALE2_CCOUNT_INT_EN; | ||
2831 | break; | ||
2832 | case XSCALE_COUNTER0: | ||
2833 | ien &= ~XSCALE2_COUNT0_INT_EN; | ||
2834 | evtsel &= ~XSCALE2_COUNT0_EVT_MASK; | ||
2835 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; | ||
2836 | break; | ||
2837 | case XSCALE_COUNTER1: | ||
2838 | ien &= ~XSCALE2_COUNT1_INT_EN; | ||
2839 | evtsel &= ~XSCALE2_COUNT1_EVT_MASK; | ||
2840 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; | ||
2841 | break; | ||
2842 | case XSCALE_COUNTER2: | ||
2843 | ien &= ~XSCALE2_COUNT2_INT_EN; | ||
2844 | evtsel &= ~XSCALE2_COUNT2_EVT_MASK; | ||
2845 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; | ||
2846 | break; | ||
2847 | case XSCALE_COUNTER3: | ||
2848 | ien &= ~XSCALE2_COUNT3_INT_EN; | ||
2849 | evtsel &= ~XSCALE2_COUNT3_EVT_MASK; | ||
2850 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; | ||
2851 | break; | ||
2852 | default: | ||
2853 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
2854 | return; | ||
2855 | } | ||
2856 | |||
2857 | spin_lock_irqsave(&pmu_lock, flags); | ||
2858 | xscale2pmu_write_event_select(evtsel); | ||
2859 | xscale2pmu_write_int_enable(ien); | ||
2860 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
2861 | } | ||
2862 | |||
2863 | static int | ||
2864 | xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc, | ||
2865 | struct hw_perf_event *event) | ||
2866 | { | ||
2867 | int idx = xscale1pmu_get_event_idx(cpuc, event); | ||
2868 | if (idx >= 0) | ||
2869 | goto out; | ||
2870 | |||
2871 | if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask)) | ||
2872 | idx = XSCALE_COUNTER3; | ||
2873 | else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask)) | ||
2874 | idx = XSCALE_COUNTER2; | ||
2875 | out: | ||
2876 | return idx; | ||
2877 | } | ||
2878 | |||
2879 | static void | ||
2880 | xscale2pmu_start(void) | ||
2881 | { | ||
2882 | unsigned long flags, val; | ||
2883 | |||
2884 | spin_lock_irqsave(&pmu_lock, flags); | ||
2885 | val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; | ||
2886 | val |= XSCALE_PMU_ENABLE; | ||
2887 | xscale2pmu_write_pmnc(val); | ||
2888 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
2889 | } | ||
2890 | |||
2891 | static void | ||
2892 | xscale2pmu_stop(void) | ||
2893 | { | ||
2894 | unsigned long flags, val; | ||
2895 | |||
2896 | spin_lock_irqsave(&pmu_lock, flags); | ||
2897 | val = xscale2pmu_read_pmnc(); | ||
2898 | val &= ~XSCALE_PMU_ENABLE; | ||
2899 | xscale2pmu_write_pmnc(val); | ||
2900 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
2901 | } | ||
2902 | |||
2903 | static inline u32 | ||
2904 | xscale2pmu_read_counter(int counter) | ||
2905 | { | ||
2906 | u32 val = 0; | ||
2907 | |||
2908 | switch (counter) { | ||
2909 | case XSCALE_CYCLE_COUNTER: | ||
2910 | asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val)); | ||
2911 | break; | ||
2912 | case XSCALE_COUNTER0: | ||
2913 | asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val)); | ||
2914 | break; | ||
2915 | case XSCALE_COUNTER1: | ||
2916 | asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val)); | ||
2917 | break; | ||
2918 | case XSCALE_COUNTER2: | ||
2919 | asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val)); | ||
2920 | break; | ||
2921 | case XSCALE_COUNTER3: | ||
2922 | asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val)); | ||
2923 | break; | ||
2924 | } | ||
2925 | |||
2926 | return val; | ||
2927 | } | ||
2928 | |||
2929 | static inline void | ||
2930 | xscale2pmu_write_counter(int counter, u32 val) | ||
2931 | { | ||
2932 | switch (counter) { | ||
2933 | case XSCALE_CYCLE_COUNTER: | ||
2934 | asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); | ||
2935 | break; | ||
2936 | case XSCALE_COUNTER0: | ||
2937 | asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val)); | ||
2938 | break; | ||
2939 | case XSCALE_COUNTER1: | ||
2940 | asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val)); | ||
2941 | break; | ||
2942 | case XSCALE_COUNTER2: | ||
2943 | asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val)); | ||
2944 | break; | ||
2945 | case XSCALE_COUNTER3: | ||
2946 | asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val)); | ||
2947 | break; | ||
2948 | } | ||
2949 | } | ||
2950 | |||
2951 | static const struct arm_pmu xscale2pmu = { | ||
2952 | .id = ARM_PERF_PMU_ID_XSCALE2, | ||
2953 | .handle_irq = xscale2pmu_handle_irq, | ||
2954 | .enable = xscale2pmu_enable_event, | ||
2955 | .disable = xscale2pmu_disable_event, | ||
2956 | .event_map = xscalepmu_event_map, | ||
2957 | .raw_event = xscalepmu_raw_event, | ||
2958 | .read_counter = xscale2pmu_read_counter, | ||
2959 | .write_counter = xscale2pmu_write_counter, | ||
2960 | .get_event_idx = xscale2pmu_get_event_idx, | ||
2961 | .start = xscale2pmu_start, | ||
2962 | .stop = xscale2pmu_stop, | ||
2963 | .num_events = 5, | ||
2964 | .max_period = (1LLU << 32) - 1, | ||
2965 | }; | ||
2966 | 611 | ||
2967 | static int __init | 612 | static int __init |
2968 | init_hw_perf_events(void) | 613 | init_hw_perf_events(void) |
@@ -2977,37 +622,16 @@ init_hw_perf_events(void) | |||
2977 | case 0xB360: /* ARM1136 */ | 622 | case 0xB360: /* ARM1136 */ |
2978 | case 0xB560: /* ARM1156 */ | 623 | case 0xB560: /* ARM1156 */ |
2979 | case 0xB760: /* ARM1176 */ | 624 | case 0xB760: /* ARM1176 */ |
2980 | armpmu = &armv6pmu; | 625 | armpmu = armv6pmu_init(); |
2981 | memcpy(armpmu_perf_cache_map, armv6_perf_cache_map, | ||
2982 | sizeof(armv6_perf_cache_map)); | ||
2983 | break; | 626 | break; |
2984 | case 0xB020: /* ARM11mpcore */ | 627 | case 0xB020: /* ARM11mpcore */ |
2985 | armpmu = &armv6mpcore_pmu; | 628 | armpmu = armv6mpcore_pmu_init(); |
2986 | memcpy(armpmu_perf_cache_map, | ||
2987 | armv6mpcore_perf_cache_map, | ||
2988 | sizeof(armv6mpcore_perf_cache_map)); | ||
2989 | break; | 629 | break; |
2990 | case 0xC080: /* Cortex-A8 */ | 630 | case 0xC080: /* Cortex-A8 */ |
2991 | armv7pmu.id = ARM_PERF_PMU_ID_CA8; | 631 | armpmu = armv7_a8_pmu_init(); |
2992 | memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map, | ||
2993 | sizeof(armv7_a8_perf_cache_map)); | ||
2994 | armv7pmu.event_map = armv7_a8_pmu_event_map; | ||
2995 | armpmu = &armv7pmu; | ||
2996 | |||
2997 | /* Reset PMNC and read the nb of CNTx counters | ||
2998 | supported */ | ||
2999 | armv7pmu.num_events = armv7_reset_read_pmnc(); | ||
3000 | break; | 632 | break; |
3001 | case 0xC090: /* Cortex-A9 */ | 633 | case 0xC090: /* Cortex-A9 */ |
3002 | armv7pmu.id = ARM_PERF_PMU_ID_CA9; | 634 | armpmu = armv7_a9_pmu_init(); |
3003 | memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map, | ||
3004 | sizeof(armv7_a9_perf_cache_map)); | ||
3005 | armv7pmu.event_map = armv7_a9_pmu_event_map; | ||
3006 | armpmu = &armv7pmu; | ||
3007 | |||
3008 | /* Reset PMNC and read the nb of CNTx counters | ||
3009 | supported */ | ||
3010 | armv7pmu.num_events = armv7_reset_read_pmnc(); | ||
3011 | break; | 635 | break; |
3012 | } | 636 | } |
3013 | /* Intel CPUs [xscale]. */ | 637 | /* Intel CPUs [xscale]. */ |
@@ -3015,21 +639,17 @@ init_hw_perf_events(void) | |||
3015 | part_number = (cpuid >> 13) & 0x7; | 639 | part_number = (cpuid >> 13) & 0x7; |
3016 | switch (part_number) { | 640 | switch (part_number) { |
3017 | case 1: | 641 | case 1: |
3018 | armpmu = &xscale1pmu; | 642 | armpmu = xscale1pmu_init(); |
3019 | memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, | ||
3020 | sizeof(xscale_perf_cache_map)); | ||
3021 | break; | 643 | break; |
3022 | case 2: | 644 | case 2: |
3023 | armpmu = &xscale2pmu; | 645 | armpmu = xscale2pmu_init(); |
3024 | memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, | ||
3025 | sizeof(xscale_perf_cache_map)); | ||
3026 | break; | 646 | break; |
3027 | } | 647 | } |
3028 | } | 648 | } |
3029 | 649 | ||
3030 | if (armpmu) { | 650 | if (armpmu) { |
3031 | pr_info("enabled with %s PMU driver, %d counters available\n", | 651 | pr_info("enabled with %s PMU driver, %d counters available\n", |
3032 | arm_pmu_names[armpmu->id], armpmu->num_events); | 652 | armpmu->name, armpmu->num_events); |
3033 | } else { | 653 | } else { |
3034 | pr_info("no hardware support available\n"); | 654 | pr_info("no hardware support available\n"); |
3035 | } | 655 | } |
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c new file mode 100644 index 000000000000..7aeb07da9076 --- /dev/null +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -0,0 +1,672 @@ | |||
1 | /* | ||
2 | * ARMv6 Performance counter handling code. | ||
3 | * | ||
4 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles | ||
5 | * | ||
6 | * ARMv6 has 2 configurable performance counters and a single cycle counter. | ||
7 | * They all share a single reset bit but can be written to zero so we can use | ||
8 | * that for a reset. | ||
9 | * | ||
10 | * The counters can't be individually enabled or disabled so when we remove | ||
11 | * one event and replace it with another we could get spurious counts from the | ||
12 | * wrong event. However, we can take advantage of the fact that the | ||
13 | * performance counters can export events to the event bus, and the event bus | ||
14 | * itself can be monitored. This requires that we *don't* export the events to | ||
15 | * the event bus. The procedure for disabling a configurable counter is: | ||
16 | * - change the counter to count the ETMEXTOUT[0] signal (0x20). This | ||
17 | * effectively stops the counter from counting. | ||
18 | * - disable the counter's interrupt generation (each counter has it's | ||
19 | * own interrupt enable bit). | ||
20 | * Once stopped, the counter value can be written as 0 to reset. | ||
21 | * | ||
22 | * To enable a counter: | ||
23 | * - enable the counter's interrupt generation. | ||
24 | * - set the new event type. | ||
25 | * | ||
26 | * Note: the dedicated cycle counter only counts cycles and can't be | ||
27 | * enabled/disabled independently of the others. When we want to disable the | ||
28 | * cycle counter, we have to just disable the interrupt reporting and start | ||
29 | * ignoring that counter. When re-enabling, we have to reset the value and | ||
30 | * enable the interrupt. | ||
31 | */ | ||
32 | |||
33 | #ifdef CONFIG_CPU_V6 | ||
34 | enum armv6_perf_types { | ||
35 | ARMV6_PERFCTR_ICACHE_MISS = 0x0, | ||
36 | ARMV6_PERFCTR_IBUF_STALL = 0x1, | ||
37 | ARMV6_PERFCTR_DDEP_STALL = 0x2, | ||
38 | ARMV6_PERFCTR_ITLB_MISS = 0x3, | ||
39 | ARMV6_PERFCTR_DTLB_MISS = 0x4, | ||
40 | ARMV6_PERFCTR_BR_EXEC = 0x5, | ||
41 | ARMV6_PERFCTR_BR_MISPREDICT = 0x6, | ||
42 | ARMV6_PERFCTR_INSTR_EXEC = 0x7, | ||
43 | ARMV6_PERFCTR_DCACHE_HIT = 0x9, | ||
44 | ARMV6_PERFCTR_DCACHE_ACCESS = 0xA, | ||
45 | ARMV6_PERFCTR_DCACHE_MISS = 0xB, | ||
46 | ARMV6_PERFCTR_DCACHE_WBACK = 0xC, | ||
47 | ARMV6_PERFCTR_SW_PC_CHANGE = 0xD, | ||
48 | ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF, | ||
49 | ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10, | ||
50 | ARMV6_PERFCTR_LSU_FULL_STALL = 0x11, | ||
51 | ARMV6_PERFCTR_WBUF_DRAINED = 0x12, | ||
52 | ARMV6_PERFCTR_CPU_CYCLES = 0xFF, | ||
53 | ARMV6_PERFCTR_NOP = 0x20, | ||
54 | }; | ||
55 | |||
56 | enum armv6_counters { | ||
57 | ARMV6_CYCLE_COUNTER = 1, | ||
58 | ARMV6_COUNTER0, | ||
59 | ARMV6_COUNTER1, | ||
60 | }; | ||
61 | |||
62 | /* | ||
63 | * The hardware events that we support. We do support cache operations but | ||
64 | * we have harvard caches and no way to combine instruction and data | ||
65 | * accesses/misses in hardware. | ||
66 | */ | ||
67 | static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { | ||
68 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, | ||
69 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, | ||
70 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
71 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
72 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, | ||
73 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, | ||
74 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | ||
75 | }; | ||
76 | |||
77 | static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
78 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
79 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
80 | [C(L1D)] = { | ||
81 | /* | ||
82 | * The performance counters don't differentiate between read | ||
83 | * and write accesses/misses so this isn't strictly correct, | ||
84 | * but it's the best we can do. Writes and reads get | ||
85 | * combined. | ||
86 | */ | ||
87 | [C(OP_READ)] = { | ||
88 | [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, | ||
89 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, | ||
90 | }, | ||
91 | [C(OP_WRITE)] = { | ||
92 | [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, | ||
93 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, | ||
94 | }, | ||
95 | [C(OP_PREFETCH)] = { | ||
96 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
97 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
98 | }, | ||
99 | }, | ||
100 | [C(L1I)] = { | ||
101 | [C(OP_READ)] = { | ||
102 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
103 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, | ||
104 | }, | ||
105 | [C(OP_WRITE)] = { | ||
106 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
107 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, | ||
108 | }, | ||
109 | [C(OP_PREFETCH)] = { | ||
110 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
111 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
112 | }, | ||
113 | }, | ||
114 | [C(LL)] = { | ||
115 | [C(OP_READ)] = { | ||
116 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
117 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
118 | }, | ||
119 | [C(OP_WRITE)] = { | ||
120 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
121 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
122 | }, | ||
123 | [C(OP_PREFETCH)] = { | ||
124 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
125 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
126 | }, | ||
127 | }, | ||
128 | [C(DTLB)] = { | ||
129 | /* | ||
130 | * The ARM performance counters can count micro DTLB misses, | ||
131 | * micro ITLB misses and main TLB misses. There isn't an event | ||
132 | * for TLB misses, so use the micro misses here and if users | ||
133 | * want the main TLB misses they can use a raw counter. | ||
134 | */ | ||
135 | [C(OP_READ)] = { | ||
136 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
137 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, | ||
138 | }, | ||
139 | [C(OP_WRITE)] = { | ||
140 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
141 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, | ||
142 | }, | ||
143 | [C(OP_PREFETCH)] = { | ||
144 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
145 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
146 | }, | ||
147 | }, | ||
148 | [C(ITLB)] = { | ||
149 | [C(OP_READ)] = { | ||
150 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
151 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, | ||
152 | }, | ||
153 | [C(OP_WRITE)] = { | ||
154 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
155 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, | ||
156 | }, | ||
157 | [C(OP_PREFETCH)] = { | ||
158 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
159 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
160 | }, | ||
161 | }, | ||
162 | [C(BPU)] = { | ||
163 | [C(OP_READ)] = { | ||
164 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
165 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
166 | }, | ||
167 | [C(OP_WRITE)] = { | ||
168 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
169 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
170 | }, | ||
171 | [C(OP_PREFETCH)] = { | ||
172 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
173 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
174 | }, | ||
175 | }, | ||
176 | }; | ||
177 | |||
178 | enum armv6mpcore_perf_types { | ||
179 | ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0, | ||
180 | ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1, | ||
181 | ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2, | ||
182 | ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3, | ||
183 | ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4, | ||
184 | ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5, | ||
185 | ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6, | ||
186 | ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7, | ||
187 | ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8, | ||
188 | ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA, | ||
189 | ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB, | ||
190 | ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC, | ||
191 | ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD, | ||
192 | ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE, | ||
193 | ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF, | ||
194 | ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10, | ||
195 | ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11, | ||
196 | ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12, | ||
197 | ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13, | ||
198 | ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF, | ||
199 | }; | ||
200 | |||
201 | /* | ||
202 | * The hardware events that we support. We do support cache operations but | ||
203 | * we have harvard caches and no way to combine instruction and data | ||
204 | * accesses/misses in hardware. | ||
205 | */ | ||
206 | static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = { | ||
207 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, | ||
208 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, | ||
209 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
210 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
211 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, | ||
212 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, | ||
213 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | ||
214 | }; | ||
215 | |||
216 | static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
217 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
218 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
219 | [C(L1D)] = { | ||
220 | [C(OP_READ)] = { | ||
221 | [C(RESULT_ACCESS)] = | ||
222 | ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS, | ||
223 | [C(RESULT_MISS)] = | ||
224 | ARMV6MPCORE_PERFCTR_DCACHE_RDMISS, | ||
225 | }, | ||
226 | [C(OP_WRITE)] = { | ||
227 | [C(RESULT_ACCESS)] = | ||
228 | ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS, | ||
229 | [C(RESULT_MISS)] = | ||
230 | ARMV6MPCORE_PERFCTR_DCACHE_WRMISS, | ||
231 | }, | ||
232 | [C(OP_PREFETCH)] = { | ||
233 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
234 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
235 | }, | ||
236 | }, | ||
237 | [C(L1I)] = { | ||
238 | [C(OP_READ)] = { | ||
239 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
240 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, | ||
241 | }, | ||
242 | [C(OP_WRITE)] = { | ||
243 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
244 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, | ||
245 | }, | ||
246 | [C(OP_PREFETCH)] = { | ||
247 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
248 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
249 | }, | ||
250 | }, | ||
251 | [C(LL)] = { | ||
252 | [C(OP_READ)] = { | ||
253 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
254 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
255 | }, | ||
256 | [C(OP_WRITE)] = { | ||
257 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
258 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
259 | }, | ||
260 | [C(OP_PREFETCH)] = { | ||
261 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
262 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
263 | }, | ||
264 | }, | ||
265 | [C(DTLB)] = { | ||
266 | /* | ||
267 | * The ARM performance counters can count micro DTLB misses, | ||
268 | * micro ITLB misses and main TLB misses. There isn't an event | ||
269 | * for TLB misses, so use the micro misses here and if users | ||
270 | * want the main TLB misses they can use a raw counter. | ||
271 | */ | ||
272 | [C(OP_READ)] = { | ||
273 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
274 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, | ||
275 | }, | ||
276 | [C(OP_WRITE)] = { | ||
277 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
278 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, | ||
279 | }, | ||
280 | [C(OP_PREFETCH)] = { | ||
281 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
282 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
283 | }, | ||
284 | }, | ||
285 | [C(ITLB)] = { | ||
286 | [C(OP_READ)] = { | ||
287 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
288 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, | ||
289 | }, | ||
290 | [C(OP_WRITE)] = { | ||
291 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
292 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, | ||
293 | }, | ||
294 | [C(OP_PREFETCH)] = { | ||
295 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
296 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
297 | }, | ||
298 | }, | ||
299 | [C(BPU)] = { | ||
300 | [C(OP_READ)] = { | ||
301 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
302 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
303 | }, | ||
304 | [C(OP_WRITE)] = { | ||
305 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
306 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
307 | }, | ||
308 | [C(OP_PREFETCH)] = { | ||
309 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
310 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
311 | }, | ||
312 | }, | ||
313 | }; | ||
314 | |||
315 | static inline unsigned long | ||
316 | armv6_pmcr_read(void) | ||
317 | { | ||
318 | u32 val; | ||
319 | asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val)); | ||
320 | return val; | ||
321 | } | ||
322 | |||
323 | static inline void | ||
324 | armv6_pmcr_write(unsigned long val) | ||
325 | { | ||
326 | asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val)); | ||
327 | } | ||
328 | |||
329 | #define ARMV6_PMCR_ENABLE (1 << 0) | ||
330 | #define ARMV6_PMCR_CTR01_RESET (1 << 1) | ||
331 | #define ARMV6_PMCR_CCOUNT_RESET (1 << 2) | ||
332 | #define ARMV6_PMCR_CCOUNT_DIV (1 << 3) | ||
333 | #define ARMV6_PMCR_COUNT0_IEN (1 << 4) | ||
334 | #define ARMV6_PMCR_COUNT1_IEN (1 << 5) | ||
335 | #define ARMV6_PMCR_CCOUNT_IEN (1 << 6) | ||
336 | #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8) | ||
337 | #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9) | ||
338 | #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10) | ||
339 | #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20 | ||
340 | #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT) | ||
341 | #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12 | ||
342 | #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT) | ||
343 | |||
344 | #define ARMV6_PMCR_OVERFLOWED_MASK \ | ||
345 | (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \ | ||
346 | ARMV6_PMCR_CCOUNT_OVERFLOW) | ||
347 | |||
348 | static inline int | ||
349 | armv6_pmcr_has_overflowed(unsigned long pmcr) | ||
350 | { | ||
351 | return pmcr & ARMV6_PMCR_OVERFLOWED_MASK; | ||
352 | } | ||
353 | |||
354 | static inline int | ||
355 | armv6_pmcr_counter_has_overflowed(unsigned long pmcr, | ||
356 | enum armv6_counters counter) | ||
357 | { | ||
358 | int ret = 0; | ||
359 | |||
360 | if (ARMV6_CYCLE_COUNTER == counter) | ||
361 | ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW; | ||
362 | else if (ARMV6_COUNTER0 == counter) | ||
363 | ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW; | ||
364 | else if (ARMV6_COUNTER1 == counter) | ||
365 | ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW; | ||
366 | else | ||
367 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
368 | |||
369 | return ret; | ||
370 | } | ||
371 | |||
372 | static inline u32 | ||
373 | armv6pmu_read_counter(int counter) | ||
374 | { | ||
375 | unsigned long value = 0; | ||
376 | |||
377 | if (ARMV6_CYCLE_COUNTER == counter) | ||
378 | asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value)); | ||
379 | else if (ARMV6_COUNTER0 == counter) | ||
380 | asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value)); | ||
381 | else if (ARMV6_COUNTER1 == counter) | ||
382 | asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value)); | ||
383 | else | ||
384 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
385 | |||
386 | return value; | ||
387 | } | ||
388 | |||
389 | static inline void | ||
390 | armv6pmu_write_counter(int counter, | ||
391 | u32 value) | ||
392 | { | ||
393 | if (ARMV6_CYCLE_COUNTER == counter) | ||
394 | asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); | ||
395 | else if (ARMV6_COUNTER0 == counter) | ||
396 | asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value)); | ||
397 | else if (ARMV6_COUNTER1 == counter) | ||
398 | asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value)); | ||
399 | else | ||
400 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
401 | } | ||
402 | |||
403 | void | ||
404 | armv6pmu_enable_event(struct hw_perf_event *hwc, | ||
405 | int idx) | ||
406 | { | ||
407 | unsigned long val, mask, evt, flags; | ||
408 | |||
409 | if (ARMV6_CYCLE_COUNTER == idx) { | ||
410 | mask = 0; | ||
411 | evt = ARMV6_PMCR_CCOUNT_IEN; | ||
412 | } else if (ARMV6_COUNTER0 == idx) { | ||
413 | mask = ARMV6_PMCR_EVT_COUNT0_MASK; | ||
414 | evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) | | ||
415 | ARMV6_PMCR_COUNT0_IEN; | ||
416 | } else if (ARMV6_COUNTER1 == idx) { | ||
417 | mask = ARMV6_PMCR_EVT_COUNT1_MASK; | ||
418 | evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) | | ||
419 | ARMV6_PMCR_COUNT1_IEN; | ||
420 | } else { | ||
421 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
422 | return; | ||
423 | } | ||
424 | |||
425 | /* | ||
426 | * Mask out the current event and set the counter to count the event | ||
427 | * that we're interested in. | ||
428 | */ | ||
429 | spin_lock_irqsave(&pmu_lock, flags); | ||
430 | val = armv6_pmcr_read(); | ||
431 | val &= ~mask; | ||
432 | val |= evt; | ||
433 | armv6_pmcr_write(val); | ||
434 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
435 | } | ||
436 | |||
437 | static irqreturn_t | ||
438 | armv6pmu_handle_irq(int irq_num, | ||
439 | void *dev) | ||
440 | { | ||
441 | unsigned long pmcr = armv6_pmcr_read(); | ||
442 | struct perf_sample_data data; | ||
443 | struct cpu_hw_events *cpuc; | ||
444 | struct pt_regs *regs; | ||
445 | int idx; | ||
446 | |||
447 | if (!armv6_pmcr_has_overflowed(pmcr)) | ||
448 | return IRQ_NONE; | ||
449 | |||
450 | regs = get_irq_regs(); | ||
451 | |||
452 | /* | ||
453 | * The interrupts are cleared by writing the overflow flags back to | ||
454 | * the control register. All of the other bits don't have any effect | ||
455 | * if they are rewritten, so write the whole value back. | ||
456 | */ | ||
457 | armv6_pmcr_write(pmcr); | ||
458 | |||
459 | perf_sample_data_init(&data, 0); | ||
460 | |||
461 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
462 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
463 | struct perf_event *event = cpuc->events[idx]; | ||
464 | struct hw_perf_event *hwc; | ||
465 | |||
466 | if (!test_bit(idx, cpuc->active_mask)) | ||
467 | continue; | ||
468 | |||
469 | /* | ||
470 | * We have a single interrupt for all counters. Check that | ||
471 | * each counter has overflowed before we process it. | ||
472 | */ | ||
473 | if (!armv6_pmcr_counter_has_overflowed(pmcr, idx)) | ||
474 | continue; | ||
475 | |||
476 | hwc = &event->hw; | ||
477 | armpmu_event_update(event, hwc, idx); | ||
478 | data.period = event->hw.last_period; | ||
479 | if (!armpmu_event_set_period(event, hwc, idx)) | ||
480 | continue; | ||
481 | |||
482 | if (perf_event_overflow(event, 0, &data, regs)) | ||
483 | armpmu->disable(hwc, idx); | ||
484 | } | ||
485 | |||
486 | /* | ||
487 | * Handle the pending perf events. | ||
488 | * | ||
489 | * Note: this call *must* be run with interrupts disabled. For | ||
490 | * platforms that can have the PMU interrupts raised as an NMI, this | ||
491 | * will not work. | ||
492 | */ | ||
493 | irq_work_run(); | ||
494 | |||
495 | return IRQ_HANDLED; | ||
496 | } | ||
497 | |||
498 | static void | ||
499 | armv6pmu_start(void) | ||
500 | { | ||
501 | unsigned long flags, val; | ||
502 | |||
503 | spin_lock_irqsave(&pmu_lock, flags); | ||
504 | val = armv6_pmcr_read(); | ||
505 | val |= ARMV6_PMCR_ENABLE; | ||
506 | armv6_pmcr_write(val); | ||
507 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
508 | } | ||
509 | |||
510 | static void | ||
511 | armv6pmu_stop(void) | ||
512 | { | ||
513 | unsigned long flags, val; | ||
514 | |||
515 | spin_lock_irqsave(&pmu_lock, flags); | ||
516 | val = armv6_pmcr_read(); | ||
517 | val &= ~ARMV6_PMCR_ENABLE; | ||
518 | armv6_pmcr_write(val); | ||
519 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
520 | } | ||
521 | |||
522 | static int | ||
523 | armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, | ||
524 | struct hw_perf_event *event) | ||
525 | { | ||
526 | /* Always place a cycle counter into the cycle counter. */ | ||
527 | if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) { | ||
528 | if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) | ||
529 | return -EAGAIN; | ||
530 | |||
531 | return ARMV6_CYCLE_COUNTER; | ||
532 | } else { | ||
533 | /* | ||
534 | * For anything other than a cycle counter, try and use | ||
535 | * counter0 and counter1. | ||
536 | */ | ||
537 | if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) | ||
538 | return ARMV6_COUNTER1; | ||
539 | |||
540 | if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) | ||
541 | return ARMV6_COUNTER0; | ||
542 | |||
543 | /* The counters are all in use. */ | ||
544 | return -EAGAIN; | ||
545 | } | ||
546 | } | ||
547 | |||
548 | static void | ||
549 | armv6pmu_disable_event(struct hw_perf_event *hwc, | ||
550 | int idx) | ||
551 | { | ||
552 | unsigned long val, mask, evt, flags; | ||
553 | |||
554 | if (ARMV6_CYCLE_COUNTER == idx) { | ||
555 | mask = ARMV6_PMCR_CCOUNT_IEN; | ||
556 | evt = 0; | ||
557 | } else if (ARMV6_COUNTER0 == idx) { | ||
558 | mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK; | ||
559 | evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT; | ||
560 | } else if (ARMV6_COUNTER1 == idx) { | ||
561 | mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK; | ||
562 | evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT; | ||
563 | } else { | ||
564 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
565 | return; | ||
566 | } | ||
567 | |||
568 | /* | ||
569 | * Mask out the current event and set the counter to count the number | ||
570 | * of ETM bus signal assertion cycles. The external reporting should | ||
571 | * be disabled and so this should never increment. | ||
572 | */ | ||
573 | spin_lock_irqsave(&pmu_lock, flags); | ||
574 | val = armv6_pmcr_read(); | ||
575 | val &= ~mask; | ||
576 | val |= evt; | ||
577 | armv6_pmcr_write(val); | ||
578 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
579 | } | ||
580 | |||
581 | static void | ||
582 | armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | ||
583 | int idx) | ||
584 | { | ||
585 | unsigned long val, mask, flags, evt = 0; | ||
586 | |||
587 | if (ARMV6_CYCLE_COUNTER == idx) { | ||
588 | mask = ARMV6_PMCR_CCOUNT_IEN; | ||
589 | } else if (ARMV6_COUNTER0 == idx) { | ||
590 | mask = ARMV6_PMCR_COUNT0_IEN; | ||
591 | } else if (ARMV6_COUNTER1 == idx) { | ||
592 | mask = ARMV6_PMCR_COUNT1_IEN; | ||
593 | } else { | ||
594 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
595 | return; | ||
596 | } | ||
597 | |||
598 | /* | ||
599 | * Unlike UP ARMv6, we don't have a way of stopping the counters. We | ||
600 | * simply disable the interrupt reporting. | ||
601 | */ | ||
602 | spin_lock_irqsave(&pmu_lock, flags); | ||
603 | val = armv6_pmcr_read(); | ||
604 | val &= ~mask; | ||
605 | val |= evt; | ||
606 | armv6_pmcr_write(val); | ||
607 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
608 | } | ||
609 | |||
610 | static const struct arm_pmu armv6pmu = { | ||
611 | .id = ARM_PERF_PMU_ID_V6, | ||
612 | .name = "v6", | ||
613 | .handle_irq = armv6pmu_handle_irq, | ||
614 | .enable = armv6pmu_enable_event, | ||
615 | .disable = armv6pmu_disable_event, | ||
616 | .read_counter = armv6pmu_read_counter, | ||
617 | .write_counter = armv6pmu_write_counter, | ||
618 | .get_event_idx = armv6pmu_get_event_idx, | ||
619 | .start = armv6pmu_start, | ||
620 | .stop = armv6pmu_stop, | ||
621 | .cache_map = &armv6_perf_cache_map, | ||
622 | .event_map = &armv6_perf_map, | ||
623 | .raw_event_mask = 0xFF, | ||
624 | .num_events = 3, | ||
625 | .max_period = (1LLU << 32) - 1, | ||
626 | }; | ||
627 | |||
628 | const struct arm_pmu *__init armv6pmu_init(void) | ||
629 | { | ||
630 | return &armv6pmu; | ||
631 | } | ||
632 | |||
633 | /* | ||
634 | * ARMv6mpcore is almost identical to single core ARMv6 with the exception | ||
635 | * that some of the events have different enumerations and that there is no | ||
636 | * *hack* to stop the programmable counters. To stop the counters we simply | ||
637 | * disable the interrupt reporting and update the event. When unthrottling we | ||
638 | * reset the period and enable the interrupt reporting. | ||
639 | */ | ||
640 | static const struct arm_pmu armv6mpcore_pmu = { | ||
641 | .id = ARM_PERF_PMU_ID_V6MP, | ||
642 | .name = "v6mpcore", | ||
643 | .handle_irq = armv6pmu_handle_irq, | ||
644 | .enable = armv6pmu_enable_event, | ||
645 | .disable = armv6mpcore_pmu_disable_event, | ||
646 | .read_counter = armv6pmu_read_counter, | ||
647 | .write_counter = armv6pmu_write_counter, | ||
648 | .get_event_idx = armv6pmu_get_event_idx, | ||
649 | .start = armv6pmu_start, | ||
650 | .stop = armv6pmu_stop, | ||
651 | .cache_map = &armv6mpcore_perf_cache_map, | ||
652 | .event_map = &armv6mpcore_perf_map, | ||
653 | .raw_event_mask = 0xFF, | ||
654 | .num_events = 3, | ||
655 | .max_period = (1LLU << 32) - 1, | ||
656 | }; | ||
657 | |||
658 | const struct arm_pmu *__init armv6mpcore_pmu_init(void) | ||
659 | { | ||
660 | return &armv6mpcore_pmu; | ||
661 | } | ||
662 | #else | ||
663 | const struct arm_pmu *__init armv6pmu_init(void) | ||
664 | { | ||
665 | return NULL; | ||
666 | } | ||
667 | |||
668 | const struct arm_pmu *__init armv6mpcore_pmu_init(void) | ||
669 | { | ||
670 | return NULL; | ||
671 | } | ||
672 | #endif /* CONFIG_CPU_V6 */ | ||
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c new file mode 100644 index 000000000000..4d0423969df9 --- /dev/null +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -0,0 +1,906 @@ | |||
1 | /* | ||
2 | * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code. | ||
3 | * | ||
4 | * ARMv7 support: Jean Pihet <jpihet@mvista.com> | ||
5 | * 2010 (c) MontaVista Software, LLC. | ||
6 | * | ||
7 | * Copied from ARMv6 code, with the low level code inspired | ||
8 | * by the ARMv7 Oprofile code. | ||
9 | * | ||
10 | * Cortex-A8 has up to 4 configurable performance counters and | ||
11 | * a single cycle counter. | ||
12 | * Cortex-A9 has up to 31 configurable performance counters and | ||
13 | * a single cycle counter. | ||
14 | * | ||
15 | * All counters can be enabled/disabled and IRQ masked separately. The cycle | ||
16 | * counter and all 4 performance counters together can be reset separately. | ||
17 | */ | ||
18 | |||
19 | #ifdef CONFIG_CPU_V7 | ||
20 | /* Common ARMv7 event types */ | ||
21 | enum armv7_perf_types { | ||
22 | ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, | ||
23 | ARMV7_PERFCTR_IFETCH_MISS = 0x01, | ||
24 | ARMV7_PERFCTR_ITLB_MISS = 0x02, | ||
25 | ARMV7_PERFCTR_DCACHE_REFILL = 0x03, | ||
26 | ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, | ||
27 | ARMV7_PERFCTR_DTLB_REFILL = 0x05, | ||
28 | ARMV7_PERFCTR_DREAD = 0x06, | ||
29 | ARMV7_PERFCTR_DWRITE = 0x07, | ||
30 | |||
31 | ARMV7_PERFCTR_EXC_TAKEN = 0x09, | ||
32 | ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, | ||
33 | ARMV7_PERFCTR_CID_WRITE = 0x0B, | ||
34 | /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. | ||
35 | * It counts: | ||
36 | * - all branch instructions, | ||
37 | * - instructions that explicitly write the PC, | ||
38 | * - exception generating instructions. | ||
39 | */ | ||
40 | ARMV7_PERFCTR_PC_WRITE = 0x0C, | ||
41 | ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, | ||
42 | ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, | ||
43 | ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, | ||
44 | ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, | ||
45 | |||
46 | ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12, | ||
47 | |||
48 | ARMV7_PERFCTR_CPU_CYCLES = 0xFF | ||
49 | }; | ||
50 | |||
51 | /* ARMv7 Cortex-A8 specific event types */ | ||
52 | enum armv7_a8_perf_types { | ||
53 | ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, | ||
54 | |||
55 | ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, | ||
56 | |||
57 | ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, | ||
58 | ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, | ||
59 | ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, | ||
60 | ARMV7_PERFCTR_L2_ACCESS = 0x43, | ||
61 | ARMV7_PERFCTR_L2_CACH_MISS = 0x44, | ||
62 | ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45, | ||
63 | ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46, | ||
64 | ARMV7_PERFCTR_MEMORY_REPLAY = 0x47, | ||
65 | ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48, | ||
66 | ARMV7_PERFCTR_L1_DATA_MISS = 0x49, | ||
67 | ARMV7_PERFCTR_L1_INST_MISS = 0x4A, | ||
68 | ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B, | ||
69 | ARMV7_PERFCTR_L1_NEON_DATA = 0x4C, | ||
70 | ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D, | ||
71 | ARMV7_PERFCTR_L2_NEON = 0x4E, | ||
72 | ARMV7_PERFCTR_L2_NEON_HIT = 0x4F, | ||
73 | ARMV7_PERFCTR_L1_INST = 0x50, | ||
74 | ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51, | ||
75 | ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52, | ||
76 | ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53, | ||
77 | ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54, | ||
78 | ARMV7_PERFCTR_OP_EXECUTED = 0x55, | ||
79 | ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56, | ||
80 | ARMV7_PERFCTR_CYCLES_INST = 0x57, | ||
81 | ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58, | ||
82 | ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59, | ||
83 | ARMV7_PERFCTR_NEON_CYCLES = 0x5A, | ||
84 | |||
85 | ARMV7_PERFCTR_PMU0_EVENTS = 0x70, | ||
86 | ARMV7_PERFCTR_PMU1_EVENTS = 0x71, | ||
87 | ARMV7_PERFCTR_PMU_EVENTS = 0x72, | ||
88 | }; | ||
89 | |||
90 | /* ARMv7 Cortex-A9 specific event types */ | ||
91 | enum armv7_a9_perf_types { | ||
92 | ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40, | ||
93 | ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41, | ||
94 | ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42, | ||
95 | |||
96 | ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50, | ||
97 | ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51, | ||
98 | |||
99 | ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60, | ||
100 | ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61, | ||
101 | ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62, | ||
102 | ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63, | ||
103 | ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64, | ||
104 | ARMV7_PERFCTR_DATA_EVICTION = 0x65, | ||
105 | ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66, | ||
106 | ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67, | ||
107 | ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68, | ||
108 | |||
109 | ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E, | ||
110 | |||
111 | ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70, | ||
112 | ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71, | ||
113 | ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72, | ||
114 | ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73, | ||
115 | ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74, | ||
116 | |||
117 | ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80, | ||
118 | ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81, | ||
119 | ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82, | ||
120 | ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83, | ||
121 | ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84, | ||
122 | ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85, | ||
123 | ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86, | ||
124 | |||
125 | ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A, | ||
126 | ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B, | ||
127 | |||
128 | ARMV7_PERFCTR_ISB_INST = 0x90, | ||
129 | ARMV7_PERFCTR_DSB_INST = 0x91, | ||
130 | ARMV7_PERFCTR_DMB_INST = 0x92, | ||
131 | ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93, | ||
132 | |||
133 | ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0, | ||
134 | ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1, | ||
135 | ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2, | ||
136 | ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3, | ||
137 | ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4, | ||
138 | ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5 | ||
139 | }; | ||
140 | |||
141 | /* | ||
142 | * Cortex-A8 HW events mapping | ||
143 | * | ||
144 | * The hardware events that we support. We do support cache operations but | ||
145 | * we have harvard caches and no way to combine instruction and data | ||
146 | * accesses/misses in hardware. | ||
147 | */ | ||
148 | static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { | ||
149 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | ||
150 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | ||
151 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
152 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
153 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | ||
154 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
155 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | ||
156 | }; | ||
157 | |||
158 | static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
159 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
160 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
161 | [C(L1D)] = { | ||
162 | /* | ||
163 | * The performance counters don't differentiate between read | ||
164 | * and write accesses/misses so this isn't strictly correct, | ||
165 | * but it's the best we can do. Writes and reads get | ||
166 | * combined. | ||
167 | */ | ||
168 | [C(OP_READ)] = { | ||
169 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
170 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
171 | }, | ||
172 | [C(OP_WRITE)] = { | ||
173 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
174 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
175 | }, | ||
176 | [C(OP_PREFETCH)] = { | ||
177 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
178 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
179 | }, | ||
180 | }, | ||
181 | [C(L1I)] = { | ||
182 | [C(OP_READ)] = { | ||
183 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, | ||
184 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, | ||
185 | }, | ||
186 | [C(OP_WRITE)] = { | ||
187 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, | ||
188 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, | ||
189 | }, | ||
190 | [C(OP_PREFETCH)] = { | ||
191 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
192 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
193 | }, | ||
194 | }, | ||
195 | [C(LL)] = { | ||
196 | [C(OP_READ)] = { | ||
197 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, | ||
198 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, | ||
199 | }, | ||
200 | [C(OP_WRITE)] = { | ||
201 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, | ||
202 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, | ||
203 | }, | ||
204 | [C(OP_PREFETCH)] = { | ||
205 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
206 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
207 | }, | ||
208 | }, | ||
209 | [C(DTLB)] = { | ||
210 | /* | ||
211 | * Only ITLB misses and DTLB refills are supported. | ||
212 | * If users want the DTLB refills misses a raw counter | ||
213 | * must be used. | ||
214 | */ | ||
215 | [C(OP_READ)] = { | ||
216 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
217 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
218 | }, | ||
219 | [C(OP_WRITE)] = { | ||
220 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
221 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
222 | }, | ||
223 | [C(OP_PREFETCH)] = { | ||
224 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
225 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
226 | }, | ||
227 | }, | ||
228 | [C(ITLB)] = { | ||
229 | [C(OP_READ)] = { | ||
230 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
231 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
232 | }, | ||
233 | [C(OP_WRITE)] = { | ||
234 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
235 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
236 | }, | ||
237 | [C(OP_PREFETCH)] = { | ||
238 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
239 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
240 | }, | ||
241 | }, | ||
242 | [C(BPU)] = { | ||
243 | [C(OP_READ)] = { | ||
244 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
245 | [C(RESULT_MISS)] | ||
246 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
247 | }, | ||
248 | [C(OP_WRITE)] = { | ||
249 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
250 | [C(RESULT_MISS)] | ||
251 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
252 | }, | ||
253 | [C(OP_PREFETCH)] = { | ||
254 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
255 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
256 | }, | ||
257 | }, | ||
258 | }; | ||
259 | |||
260 | /* | ||
261 | * Cortex-A9 HW events mapping | ||
262 | */ | ||
263 | static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { | ||
264 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | ||
265 | [PERF_COUNT_HW_INSTRUCTIONS] = | ||
266 | ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, | ||
267 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT, | ||
268 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS, | ||
269 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | ||
270 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
271 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | ||
272 | }; | ||
273 | |||
274 | static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
275 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
276 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
277 | [C(L1D)] = { | ||
278 | /* | ||
279 | * The performance counters don't differentiate between read | ||
280 | * and write accesses/misses so this isn't strictly correct, | ||
281 | * but it's the best we can do. Writes and reads get | ||
282 | * combined. | ||
283 | */ | ||
284 | [C(OP_READ)] = { | ||
285 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
286 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
287 | }, | ||
288 | [C(OP_WRITE)] = { | ||
289 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
290 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
291 | }, | ||
292 | [C(OP_PREFETCH)] = { | ||
293 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
294 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
295 | }, | ||
296 | }, | ||
297 | [C(L1I)] = { | ||
298 | [C(OP_READ)] = { | ||
299 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
300 | [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, | ||
301 | }, | ||
302 | [C(OP_WRITE)] = { | ||
303 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
304 | [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, | ||
305 | }, | ||
306 | [C(OP_PREFETCH)] = { | ||
307 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
308 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
309 | }, | ||
310 | }, | ||
311 | [C(LL)] = { | ||
312 | [C(OP_READ)] = { | ||
313 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
314 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
315 | }, | ||
316 | [C(OP_WRITE)] = { | ||
317 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
318 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
319 | }, | ||
320 | [C(OP_PREFETCH)] = { | ||
321 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
322 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
323 | }, | ||
324 | }, | ||
325 | [C(DTLB)] = { | ||
326 | /* | ||
327 | * Only ITLB misses and DTLB refills are supported. | ||
328 | * If users want the DTLB refills misses a raw counter | ||
329 | * must be used. | ||
330 | */ | ||
331 | [C(OP_READ)] = { | ||
332 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
333 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
334 | }, | ||
335 | [C(OP_WRITE)] = { | ||
336 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
337 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
338 | }, | ||
339 | [C(OP_PREFETCH)] = { | ||
340 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
341 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
342 | }, | ||
343 | }, | ||
344 | [C(ITLB)] = { | ||
345 | [C(OP_READ)] = { | ||
346 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
347 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
348 | }, | ||
349 | [C(OP_WRITE)] = { | ||
350 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
351 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
352 | }, | ||
353 | [C(OP_PREFETCH)] = { | ||
354 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
355 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
356 | }, | ||
357 | }, | ||
358 | [C(BPU)] = { | ||
359 | [C(OP_READ)] = { | ||
360 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
361 | [C(RESULT_MISS)] | ||
362 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
363 | }, | ||
364 | [C(OP_WRITE)] = { | ||
365 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
366 | [C(RESULT_MISS)] | ||
367 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
368 | }, | ||
369 | [C(OP_PREFETCH)] = { | ||
370 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
371 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
372 | }, | ||
373 | }, | ||
374 | }; | ||
375 | |||
376 | /* | ||
377 | * Perf Events counters | ||
378 | */ | ||
379 | enum armv7_counters { | ||
380 | ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ | ||
381 | ARMV7_COUNTER0 = 2, /* First event counter */ | ||
382 | }; | ||
383 | |||
384 | /* | ||
385 | * The cycle counter is ARMV7_CYCLE_COUNTER. | ||
386 | * The first event counter is ARMV7_COUNTER0. | ||
387 | * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1). | ||
388 | */ | ||
389 | #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1) | ||
390 | |||
391 | /* | ||
392 | * ARMv7 low level PMNC access | ||
393 | */ | ||
394 | |||
395 | /* | ||
396 | * Per-CPU PMNC: config reg | ||
397 | */ | ||
398 | #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */ | ||
399 | #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */ | ||
400 | #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */ | ||
401 | #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */ | ||
402 | #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */ | ||
403 | #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ | ||
404 | #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */ | ||
405 | #define ARMV7_PMNC_N_MASK 0x1f | ||
406 | #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ | ||
407 | |||
408 | /* | ||
409 | * Available counters | ||
410 | */ | ||
411 | #define ARMV7_CNT0 0 /* First event counter */ | ||
412 | #define ARMV7_CCNT 31 /* Cycle counter */ | ||
413 | |||
414 | /* Perf Event to low level counters mapping */ | ||
415 | #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0) | ||
416 | |||
417 | /* | ||
418 | * CNTENS: counters enable reg | ||
419 | */ | ||
420 | #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
421 | #define ARMV7_CNTENS_C (1 << ARMV7_CCNT) | ||
422 | |||
423 | /* | ||
424 | * CNTENC: counters disable reg | ||
425 | */ | ||
426 | #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
427 | #define ARMV7_CNTENC_C (1 << ARMV7_CCNT) | ||
428 | |||
429 | /* | ||
430 | * INTENS: counters overflow interrupt enable reg | ||
431 | */ | ||
432 | #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
433 | #define ARMV7_INTENS_C (1 << ARMV7_CCNT) | ||
434 | |||
435 | /* | ||
436 | * INTENC: counters overflow interrupt disable reg | ||
437 | */ | ||
438 | #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
439 | #define ARMV7_INTENC_C (1 << ARMV7_CCNT) | ||
440 | |||
441 | /* | ||
442 | * EVTSEL: Event selection reg | ||
443 | */ | ||
444 | #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ | ||
445 | |||
446 | /* | ||
447 | * SELECT: Counter selection reg | ||
448 | */ | ||
449 | #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */ | ||
450 | |||
451 | /* | ||
452 | * FLAG: counters overflow flag status reg | ||
453 | */ | ||
454 | #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
455 | #define ARMV7_FLAG_C (1 << ARMV7_CCNT) | ||
456 | #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ | ||
457 | #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK | ||
458 | |||
459 | static inline unsigned long armv7_pmnc_read(void) | ||
460 | { | ||
461 | u32 val; | ||
462 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); | ||
463 | return val; | ||
464 | } | ||
465 | |||
466 | static inline void armv7_pmnc_write(unsigned long val) | ||
467 | { | ||
468 | val &= ARMV7_PMNC_MASK; | ||
469 | asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); | ||
470 | } | ||
471 | |||
472 | static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) | ||
473 | { | ||
474 | return pmnc & ARMV7_OVERFLOWED_MASK; | ||
475 | } | ||
476 | |||
477 | static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, | ||
478 | enum armv7_counters counter) | ||
479 | { | ||
480 | int ret = 0; | ||
481 | |||
482 | if (counter == ARMV7_CYCLE_COUNTER) | ||
483 | ret = pmnc & ARMV7_FLAG_C; | ||
484 | else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST)) | ||
485 | ret = pmnc & ARMV7_FLAG_P(counter); | ||
486 | else | ||
487 | pr_err("CPU%u checking wrong counter %d overflow status\n", | ||
488 | smp_processor_id(), counter); | ||
489 | |||
490 | return ret; | ||
491 | } | ||
492 | |||
493 | static inline int armv7_pmnc_select_counter(unsigned int idx) | ||
494 | { | ||
495 | u32 val; | ||
496 | |||
497 | if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { | ||
498 | pr_err("CPU%u selecting wrong PMNC counter" | ||
499 | " %d\n", smp_processor_id(), idx); | ||
500 | return -1; | ||
501 | } | ||
502 | |||
503 | val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; | ||
504 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); | ||
505 | |||
506 | return idx; | ||
507 | } | ||
508 | |||
509 | static inline u32 armv7pmu_read_counter(int idx) | ||
510 | { | ||
511 | unsigned long value = 0; | ||
512 | |||
513 | if (idx == ARMV7_CYCLE_COUNTER) | ||
514 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); | ||
515 | else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { | ||
516 | if (armv7_pmnc_select_counter(idx) == idx) | ||
517 | asm volatile("mrc p15, 0, %0, c9, c13, 2" | ||
518 | : "=r" (value)); | ||
519 | } else | ||
520 | pr_err("CPU%u reading wrong counter %d\n", | ||
521 | smp_processor_id(), idx); | ||
522 | |||
523 | return value; | ||
524 | } | ||
525 | |||
526 | static inline void armv7pmu_write_counter(int idx, u32 value) | ||
527 | { | ||
528 | if (idx == ARMV7_CYCLE_COUNTER) | ||
529 | asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); | ||
530 | else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { | ||
531 | if (armv7_pmnc_select_counter(idx) == idx) | ||
532 | asm volatile("mcr p15, 0, %0, c9, c13, 2" | ||
533 | : : "r" (value)); | ||
534 | } else | ||
535 | pr_err("CPU%u writing wrong counter %d\n", | ||
536 | smp_processor_id(), idx); | ||
537 | } | ||
538 | |||
539 | static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) | ||
540 | { | ||
541 | if (armv7_pmnc_select_counter(idx) == idx) { | ||
542 | val &= ARMV7_EVTSEL_MASK; | ||
543 | asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); | ||
544 | } | ||
545 | } | ||
546 | |||
547 | static inline u32 armv7_pmnc_enable_counter(unsigned int idx) | ||
548 | { | ||
549 | u32 val; | ||
550 | |||
551 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
552 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
553 | pr_err("CPU%u enabling wrong PMNC counter" | ||
554 | " %d\n", smp_processor_id(), idx); | ||
555 | return -1; | ||
556 | } | ||
557 | |||
558 | if (idx == ARMV7_CYCLE_COUNTER) | ||
559 | val = ARMV7_CNTENS_C; | ||
560 | else | ||
561 | val = ARMV7_CNTENS_P(idx); | ||
562 | |||
563 | asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val)); | ||
564 | |||
565 | return idx; | ||
566 | } | ||
567 | |||
568 | static inline u32 armv7_pmnc_disable_counter(unsigned int idx) | ||
569 | { | ||
570 | u32 val; | ||
571 | |||
572 | |||
573 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
574 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
575 | pr_err("CPU%u disabling wrong PMNC counter" | ||
576 | " %d\n", smp_processor_id(), idx); | ||
577 | return -1; | ||
578 | } | ||
579 | |||
580 | if (idx == ARMV7_CYCLE_COUNTER) | ||
581 | val = ARMV7_CNTENC_C; | ||
582 | else | ||
583 | val = ARMV7_CNTENC_P(idx); | ||
584 | |||
585 | asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val)); | ||
586 | |||
587 | return idx; | ||
588 | } | ||
589 | |||
590 | static inline u32 armv7_pmnc_enable_intens(unsigned int idx) | ||
591 | { | ||
592 | u32 val; | ||
593 | |||
594 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
595 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
596 | pr_err("CPU%u enabling wrong PMNC counter" | ||
597 | " interrupt enable %d\n", smp_processor_id(), idx); | ||
598 | return -1; | ||
599 | } | ||
600 | |||
601 | if (idx == ARMV7_CYCLE_COUNTER) | ||
602 | val = ARMV7_INTENS_C; | ||
603 | else | ||
604 | val = ARMV7_INTENS_P(idx); | ||
605 | |||
606 | asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val)); | ||
607 | |||
608 | return idx; | ||
609 | } | ||
610 | |||
611 | static inline u32 armv7_pmnc_disable_intens(unsigned int idx) | ||
612 | { | ||
613 | u32 val; | ||
614 | |||
615 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
616 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
617 | pr_err("CPU%u disabling wrong PMNC counter" | ||
618 | " interrupt enable %d\n", smp_processor_id(), idx); | ||
619 | return -1; | ||
620 | } | ||
621 | |||
622 | if (idx == ARMV7_CYCLE_COUNTER) | ||
623 | val = ARMV7_INTENC_C; | ||
624 | else | ||
625 | val = ARMV7_INTENC_P(idx); | ||
626 | |||
627 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val)); | ||
628 | |||
629 | return idx; | ||
630 | } | ||
631 | |||
632 | static inline u32 armv7_pmnc_getreset_flags(void) | ||
633 | { | ||
634 | u32 val; | ||
635 | |||
636 | /* Read */ | ||
637 | asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); | ||
638 | |||
639 | /* Write to clear flags */ | ||
640 | val &= ARMV7_FLAG_MASK; | ||
641 | asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val)); | ||
642 | |||
643 | return val; | ||
644 | } | ||
645 | |||
646 | #ifdef DEBUG | ||
647 | static void armv7_pmnc_dump_regs(void) | ||
648 | { | ||
649 | u32 val; | ||
650 | unsigned int cnt; | ||
651 | |||
652 | printk(KERN_INFO "PMNC registers dump:\n"); | ||
653 | |||
654 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val)); | ||
655 | printk(KERN_INFO "PMNC =0x%08x\n", val); | ||
656 | |||
657 | asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val)); | ||
658 | printk(KERN_INFO "CNTENS=0x%08x\n", val); | ||
659 | |||
660 | asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val)); | ||
661 | printk(KERN_INFO "INTENS=0x%08x\n", val); | ||
662 | |||
663 | asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); | ||
664 | printk(KERN_INFO "FLAGS =0x%08x\n", val); | ||
665 | |||
666 | asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val)); | ||
667 | printk(KERN_INFO "SELECT=0x%08x\n", val); | ||
668 | |||
669 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); | ||
670 | printk(KERN_INFO "CCNT =0x%08x\n", val); | ||
671 | |||
672 | for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { | ||
673 | armv7_pmnc_select_counter(cnt); | ||
674 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); | ||
675 | printk(KERN_INFO "CNT[%d] count =0x%08x\n", | ||
676 | cnt-ARMV7_EVENT_CNT_TO_CNTx, val); | ||
677 | asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); | ||
678 | printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", | ||
679 | cnt-ARMV7_EVENT_CNT_TO_CNTx, val); | ||
680 | } | ||
681 | } | ||
682 | #endif | ||
683 | |||
684 | void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
685 | { | ||
686 | unsigned long flags; | ||
687 | |||
688 | /* | ||
689 | * Enable counter and interrupt, and set the counter to count | ||
690 | * the event that we're interested in. | ||
691 | */ | ||
692 | spin_lock_irqsave(&pmu_lock, flags); | ||
693 | |||
694 | /* | ||
695 | * Disable counter | ||
696 | */ | ||
697 | armv7_pmnc_disable_counter(idx); | ||
698 | |||
699 | /* | ||
700 | * Set event (if destined for PMNx counters) | ||
701 | * We don't need to set the event if it's a cycle count | ||
702 | */ | ||
703 | if (idx != ARMV7_CYCLE_COUNTER) | ||
704 | armv7_pmnc_write_evtsel(idx, hwc->config_base); | ||
705 | |||
706 | /* | ||
707 | * Enable interrupt for this counter | ||
708 | */ | ||
709 | armv7_pmnc_enable_intens(idx); | ||
710 | |||
711 | /* | ||
712 | * Enable counter | ||
713 | */ | ||
714 | armv7_pmnc_enable_counter(idx); | ||
715 | |||
716 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
717 | } | ||
718 | |||
719 | static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
720 | { | ||
721 | unsigned long flags; | ||
722 | |||
723 | /* | ||
724 | * Disable counter and interrupt | ||
725 | */ | ||
726 | spin_lock_irqsave(&pmu_lock, flags); | ||
727 | |||
728 | /* | ||
729 | * Disable counter | ||
730 | */ | ||
731 | armv7_pmnc_disable_counter(idx); | ||
732 | |||
733 | /* | ||
734 | * Disable interrupt for this counter | ||
735 | */ | ||
736 | armv7_pmnc_disable_intens(idx); | ||
737 | |||
738 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
739 | } | ||
740 | |||
741 | static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | ||
742 | { | ||
743 | unsigned long pmnc; | ||
744 | struct perf_sample_data data; | ||
745 | struct cpu_hw_events *cpuc; | ||
746 | struct pt_regs *regs; | ||
747 | int idx; | ||
748 | |||
749 | /* | ||
750 | * Get and reset the IRQ flags | ||
751 | */ | ||
752 | pmnc = armv7_pmnc_getreset_flags(); | ||
753 | |||
754 | /* | ||
755 | * Did an overflow occur? | ||
756 | */ | ||
757 | if (!armv7_pmnc_has_overflowed(pmnc)) | ||
758 | return IRQ_NONE; | ||
759 | |||
760 | /* | ||
761 | * Handle the counter(s) overflow(s) | ||
762 | */ | ||
763 | regs = get_irq_regs(); | ||
764 | |||
765 | perf_sample_data_init(&data, 0); | ||
766 | |||
767 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
768 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
769 | struct perf_event *event = cpuc->events[idx]; | ||
770 | struct hw_perf_event *hwc; | ||
771 | |||
772 | if (!test_bit(idx, cpuc->active_mask)) | ||
773 | continue; | ||
774 | |||
775 | /* | ||
776 | * We have a single interrupt for all counters. Check that | ||
777 | * each counter has overflowed before we process it. | ||
778 | */ | ||
779 | if (!armv7_pmnc_counter_has_overflowed(pmnc, idx)) | ||
780 | continue; | ||
781 | |||
782 | hwc = &event->hw; | ||
783 | armpmu_event_update(event, hwc, idx); | ||
784 | data.period = event->hw.last_period; | ||
785 | if (!armpmu_event_set_period(event, hwc, idx)) | ||
786 | continue; | ||
787 | |||
788 | if (perf_event_overflow(event, 0, &data, regs)) | ||
789 | armpmu->disable(hwc, idx); | ||
790 | } | ||
791 | |||
792 | /* | ||
793 | * Handle the pending perf events. | ||
794 | * | ||
795 | * Note: this call *must* be run with interrupts disabled. For | ||
796 | * platforms that can have the PMU interrupts raised as an NMI, this | ||
797 | * will not work. | ||
798 | */ | ||
799 | irq_work_run(); | ||
800 | |||
801 | return IRQ_HANDLED; | ||
802 | } | ||
803 | |||
804 | static void armv7pmu_start(void) | ||
805 | { | ||
806 | unsigned long flags; | ||
807 | |||
808 | spin_lock_irqsave(&pmu_lock, flags); | ||
809 | /* Enable all counters */ | ||
810 | armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); | ||
811 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
812 | } | ||
813 | |||
814 | static void armv7pmu_stop(void) | ||
815 | { | ||
816 | unsigned long flags; | ||
817 | |||
818 | spin_lock_irqsave(&pmu_lock, flags); | ||
819 | /* Disable all counters */ | ||
820 | armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); | ||
821 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
822 | } | ||
823 | |||
824 | static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, | ||
825 | struct hw_perf_event *event) | ||
826 | { | ||
827 | int idx; | ||
828 | |||
829 | /* Always place a cycle counter into the cycle counter. */ | ||
830 | if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { | ||
831 | if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) | ||
832 | return -EAGAIN; | ||
833 | |||
834 | return ARMV7_CYCLE_COUNTER; | ||
835 | } else { | ||
836 | /* | ||
837 | * For anything other than a cycle counter, try and use | ||
838 | * the events counters | ||
839 | */ | ||
840 | for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) { | ||
841 | if (!test_and_set_bit(idx, cpuc->used_mask)) | ||
842 | return idx; | ||
843 | } | ||
844 | |||
845 | /* The counters are all in use. */ | ||
846 | return -EAGAIN; | ||
847 | } | ||
848 | } | ||
849 | |||
850 | static struct arm_pmu armv7pmu = { | ||
851 | .handle_irq = armv7pmu_handle_irq, | ||
852 | .enable = armv7pmu_enable_event, | ||
853 | .disable = armv7pmu_disable_event, | ||
854 | .read_counter = armv7pmu_read_counter, | ||
855 | .write_counter = armv7pmu_write_counter, | ||
856 | .get_event_idx = armv7pmu_get_event_idx, | ||
857 | .start = armv7pmu_start, | ||
858 | .stop = armv7pmu_stop, | ||
859 | .raw_event_mask = 0xFF, | ||
860 | .max_period = (1LLU << 32) - 1, | ||
861 | }; | ||
862 | |||
863 | static u32 __init armv7_reset_read_pmnc(void) | ||
864 | { | ||
865 | u32 nb_cnt; | ||
866 | |||
867 | /* Initialize & Reset PMNC: C and P bits */ | ||
868 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | ||
869 | |||
870 | /* Read the nb of CNTx counters supported from PMNC */ | ||
871 | nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; | ||
872 | |||
873 | /* Add the CPU cycles counter and return */ | ||
874 | return nb_cnt + 1; | ||
875 | } | ||
876 | |||
877 | const struct arm_pmu *__init armv7_a8_pmu_init(void) | ||
878 | { | ||
879 | armv7pmu.id = ARM_PERF_PMU_ID_CA8; | ||
880 | armv7pmu.name = "ARMv7 Cortex-A8"; | ||
881 | armv7pmu.cache_map = &armv7_a8_perf_cache_map; | ||
882 | armv7pmu.event_map = &armv7_a8_perf_map; | ||
883 | armv7pmu.num_events = armv7_reset_read_pmnc(); | ||
884 | return &armv7pmu; | ||
885 | } | ||
886 | |||
887 | const struct arm_pmu *__init armv7_a9_pmu_init(void) | ||
888 | { | ||
889 | armv7pmu.id = ARM_PERF_PMU_ID_CA9; | ||
890 | armv7pmu.name = "ARMv7 Cortex-A9"; | ||
891 | armv7pmu.cache_map = &armv7_a9_perf_cache_map; | ||
892 | armv7pmu.event_map = &armv7_a9_perf_map; | ||
893 | armv7pmu.num_events = armv7_reset_read_pmnc(); | ||
894 | return &armv7pmu; | ||
895 | } | ||
896 | #else | ||
897 | const struct arm_pmu *__init armv7_a8_pmu_init(void) | ||
898 | { | ||
899 | return NULL; | ||
900 | } | ||
901 | |||
902 | const struct arm_pmu *__init armv7_a9_pmu_init(void) | ||
903 | { | ||
904 | return NULL; | ||
905 | } | ||
906 | #endif /* CONFIG_CPU_V7 */ | ||
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c new file mode 100644 index 000000000000..4e9592789d40 --- /dev/null +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -0,0 +1,807 @@ | |||
1 | /* | ||
2 | * ARMv5 [xscale] Performance counter handling code. | ||
3 | * | ||
4 | * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com> | ||
5 | * | ||
6 | * Based on the previous xscale OProfile code. | ||
7 | * | ||
8 | * There are two variants of the xscale PMU that we support: | ||
9 | * - xscale1pmu: 2 event counters and a cycle counter | ||
10 | * - xscale2pmu: 4 event counters and a cycle counter | ||
11 | * The two variants share event definitions, but have different | ||
12 | * PMU structures. | ||
13 | */ | ||
14 | |||
15 | #ifdef CONFIG_CPU_XSCALE | ||
16 | enum xscale_perf_types { | ||
17 | XSCALE_PERFCTR_ICACHE_MISS = 0x00, | ||
18 | XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01, | ||
19 | XSCALE_PERFCTR_DATA_STALL = 0x02, | ||
20 | XSCALE_PERFCTR_ITLB_MISS = 0x03, | ||
21 | XSCALE_PERFCTR_DTLB_MISS = 0x04, | ||
22 | XSCALE_PERFCTR_BRANCH = 0x05, | ||
23 | XSCALE_PERFCTR_BRANCH_MISS = 0x06, | ||
24 | XSCALE_PERFCTR_INSTRUCTION = 0x07, | ||
25 | XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08, | ||
26 | XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09, | ||
27 | XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A, | ||
28 | XSCALE_PERFCTR_DCACHE_MISS = 0x0B, | ||
29 | XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C, | ||
30 | XSCALE_PERFCTR_PC_CHANGED = 0x0D, | ||
31 | XSCALE_PERFCTR_BCU_REQUEST = 0x10, | ||
32 | XSCALE_PERFCTR_BCU_FULL = 0x11, | ||
33 | XSCALE_PERFCTR_BCU_DRAIN = 0x12, | ||
34 | XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14, | ||
35 | XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15, | ||
36 | XSCALE_PERFCTR_RMW = 0x16, | ||
37 | /* XSCALE_PERFCTR_CCNT is not hardware defined */ | ||
38 | XSCALE_PERFCTR_CCNT = 0xFE, | ||
39 | XSCALE_PERFCTR_UNUSED = 0xFF, | ||
40 | }; | ||
41 | |||
42 | enum xscale_counters { | ||
43 | XSCALE_CYCLE_COUNTER = 1, | ||
44 | XSCALE_COUNTER0, | ||
45 | XSCALE_COUNTER1, | ||
46 | XSCALE_COUNTER2, | ||
47 | XSCALE_COUNTER3, | ||
48 | }; | ||
49 | |||
50 | static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = { | ||
51 | [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT, | ||
52 | [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION, | ||
53 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
54 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
55 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH, | ||
56 | [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS, | ||
57 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | ||
58 | }; | ||
59 | |||
60 | static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
61 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
62 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
63 | [C(L1D)] = { | ||
64 | [C(OP_READ)] = { | ||
65 | [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, | ||
66 | [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, | ||
67 | }, | ||
68 | [C(OP_WRITE)] = { | ||
69 | [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, | ||
70 | [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, | ||
71 | }, | ||
72 | [C(OP_PREFETCH)] = { | ||
73 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
74 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
75 | }, | ||
76 | }, | ||
77 | [C(L1I)] = { | ||
78 | [C(OP_READ)] = { | ||
79 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
80 | [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, | ||
81 | }, | ||
82 | [C(OP_WRITE)] = { | ||
83 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
84 | [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, | ||
85 | }, | ||
86 | [C(OP_PREFETCH)] = { | ||
87 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
88 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
89 | }, | ||
90 | }, | ||
91 | [C(LL)] = { | ||
92 | [C(OP_READ)] = { | ||
93 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
94 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
95 | }, | ||
96 | [C(OP_WRITE)] = { | ||
97 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
98 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
99 | }, | ||
100 | [C(OP_PREFETCH)] = { | ||
101 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
102 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
103 | }, | ||
104 | }, | ||
105 | [C(DTLB)] = { | ||
106 | [C(OP_READ)] = { | ||
107 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
108 | [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, | ||
109 | }, | ||
110 | [C(OP_WRITE)] = { | ||
111 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
112 | [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, | ||
113 | }, | ||
114 | [C(OP_PREFETCH)] = { | ||
115 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
116 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
117 | }, | ||
118 | }, | ||
119 | [C(ITLB)] = { | ||
120 | [C(OP_READ)] = { | ||
121 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
122 | [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, | ||
123 | }, | ||
124 | [C(OP_WRITE)] = { | ||
125 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
126 | [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, | ||
127 | }, | ||
128 | [C(OP_PREFETCH)] = { | ||
129 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
130 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
131 | }, | ||
132 | }, | ||
133 | [C(BPU)] = { | ||
134 | [C(OP_READ)] = { | ||
135 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
136 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
137 | }, | ||
138 | [C(OP_WRITE)] = { | ||
139 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
140 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
141 | }, | ||
142 | [C(OP_PREFETCH)] = { | ||
143 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
144 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
145 | }, | ||
146 | }, | ||
147 | }; | ||
148 | |||
149 | #define XSCALE_PMU_ENABLE 0x001 | ||
150 | #define XSCALE_PMN_RESET 0x002 | ||
151 | #define XSCALE_CCNT_RESET 0x004 | ||
152 | #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET) | ||
153 | #define XSCALE_PMU_CNT64 0x008 | ||
154 | |||
155 | #define XSCALE1_OVERFLOWED_MASK 0x700 | ||
156 | #define XSCALE1_CCOUNT_OVERFLOW 0x400 | ||
157 | #define XSCALE1_COUNT0_OVERFLOW 0x100 | ||
158 | #define XSCALE1_COUNT1_OVERFLOW 0x200 | ||
159 | #define XSCALE1_CCOUNT_INT_EN 0x040 | ||
160 | #define XSCALE1_COUNT0_INT_EN 0x010 | ||
161 | #define XSCALE1_COUNT1_INT_EN 0x020 | ||
162 | #define XSCALE1_COUNT0_EVT_SHFT 12 | ||
163 | #define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT) | ||
164 | #define XSCALE1_COUNT1_EVT_SHFT 20 | ||
165 | #define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT) | ||
166 | |||
167 | static inline u32 | ||
168 | xscale1pmu_read_pmnc(void) | ||
169 | { | ||
170 | u32 val; | ||
171 | asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val)); | ||
172 | return val; | ||
173 | } | ||
174 | |||
175 | static inline void | ||
176 | xscale1pmu_write_pmnc(u32 val) | ||
177 | { | ||
178 | /* upper 4bits and 7, 11 are write-as-0 */ | ||
179 | val &= 0xffff77f; | ||
180 | asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val)); | ||
181 | } | ||
182 | |||
183 | static inline int | ||
184 | xscale1_pmnc_counter_has_overflowed(unsigned long pmnc, | ||
185 | enum xscale_counters counter) | ||
186 | { | ||
187 | int ret = 0; | ||
188 | |||
189 | switch (counter) { | ||
190 | case XSCALE_CYCLE_COUNTER: | ||
191 | ret = pmnc & XSCALE1_CCOUNT_OVERFLOW; | ||
192 | break; | ||
193 | case XSCALE_COUNTER0: | ||
194 | ret = pmnc & XSCALE1_COUNT0_OVERFLOW; | ||
195 | break; | ||
196 | case XSCALE_COUNTER1: | ||
197 | ret = pmnc & XSCALE1_COUNT1_OVERFLOW; | ||
198 | break; | ||
199 | default: | ||
200 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
201 | } | ||
202 | |||
203 | return ret; | ||
204 | } | ||
205 | |||
206 | static irqreturn_t | ||
207 | xscale1pmu_handle_irq(int irq_num, void *dev) | ||
208 | { | ||
209 | unsigned long pmnc; | ||
210 | struct perf_sample_data data; | ||
211 | struct cpu_hw_events *cpuc; | ||
212 | struct pt_regs *regs; | ||
213 | int idx; | ||
214 | |||
215 | /* | ||
216 | * NOTE: there's an A stepping erratum that states if an overflow | ||
217 | * bit already exists and another occurs, the previous | ||
218 | * Overflow bit gets cleared. There's no workaround. | ||
219 | * Fixed in B stepping or later. | ||
220 | */ | ||
221 | pmnc = xscale1pmu_read_pmnc(); | ||
222 | |||
223 | /* | ||
224 | * Write the value back to clear the overflow flags. Overflow | ||
225 | * flags remain in pmnc for use below. We also disable the PMU | ||
226 | * while we process the interrupt. | ||
227 | */ | ||
228 | xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); | ||
229 | |||
230 | if (!(pmnc & XSCALE1_OVERFLOWED_MASK)) | ||
231 | return IRQ_NONE; | ||
232 | |||
233 | regs = get_irq_regs(); | ||
234 | |||
235 | perf_sample_data_init(&data, 0); | ||
236 | |||
237 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
238 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
239 | struct perf_event *event = cpuc->events[idx]; | ||
240 | struct hw_perf_event *hwc; | ||
241 | |||
242 | if (!test_bit(idx, cpuc->active_mask)) | ||
243 | continue; | ||
244 | |||
245 | if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) | ||
246 | continue; | ||
247 | |||
248 | hwc = &event->hw; | ||
249 | armpmu_event_update(event, hwc, idx); | ||
250 | data.period = event->hw.last_period; | ||
251 | if (!armpmu_event_set_period(event, hwc, idx)) | ||
252 | continue; | ||
253 | |||
254 | if (perf_event_overflow(event, 0, &data, regs)) | ||
255 | armpmu->disable(hwc, idx); | ||
256 | } | ||
257 | |||
258 | irq_work_run(); | ||
259 | |||
260 | /* | ||
261 | * Re-enable the PMU. | ||
262 | */ | ||
263 | pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE; | ||
264 | xscale1pmu_write_pmnc(pmnc); | ||
265 | |||
266 | return IRQ_HANDLED; | ||
267 | } | ||
268 | |||
269 | static void | ||
270 | xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
271 | { | ||
272 | unsigned long val, mask, evt, flags; | ||
273 | |||
274 | switch (idx) { | ||
275 | case XSCALE_CYCLE_COUNTER: | ||
276 | mask = 0; | ||
277 | evt = XSCALE1_CCOUNT_INT_EN; | ||
278 | break; | ||
279 | case XSCALE_COUNTER0: | ||
280 | mask = XSCALE1_COUNT0_EVT_MASK; | ||
281 | evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) | | ||
282 | XSCALE1_COUNT0_INT_EN; | ||
283 | break; | ||
284 | case XSCALE_COUNTER1: | ||
285 | mask = XSCALE1_COUNT1_EVT_MASK; | ||
286 | evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) | | ||
287 | XSCALE1_COUNT1_INT_EN; | ||
288 | break; | ||
289 | default: | ||
290 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
291 | return; | ||
292 | } | ||
293 | |||
294 | spin_lock_irqsave(&pmu_lock, flags); | ||
295 | val = xscale1pmu_read_pmnc(); | ||
296 | val &= ~mask; | ||
297 | val |= evt; | ||
298 | xscale1pmu_write_pmnc(val); | ||
299 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
300 | } | ||
301 | |||
302 | static void | ||
303 | xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
304 | { | ||
305 | unsigned long val, mask, evt, flags; | ||
306 | |||
307 | switch (idx) { | ||
308 | case XSCALE_CYCLE_COUNTER: | ||
309 | mask = XSCALE1_CCOUNT_INT_EN; | ||
310 | evt = 0; | ||
311 | break; | ||
312 | case XSCALE_COUNTER0: | ||
313 | mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK; | ||
314 | evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT; | ||
315 | break; | ||
316 | case XSCALE_COUNTER1: | ||
317 | mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK; | ||
318 | evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT; | ||
319 | break; | ||
320 | default: | ||
321 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
322 | return; | ||
323 | } | ||
324 | |||
325 | spin_lock_irqsave(&pmu_lock, flags); | ||
326 | val = xscale1pmu_read_pmnc(); | ||
327 | val &= ~mask; | ||
328 | val |= evt; | ||
329 | xscale1pmu_write_pmnc(val); | ||
330 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
331 | } | ||
332 | |||
333 | static int | ||
334 | xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc, | ||
335 | struct hw_perf_event *event) | ||
336 | { | ||
337 | if (XSCALE_PERFCTR_CCNT == event->config_base) { | ||
338 | if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) | ||
339 | return -EAGAIN; | ||
340 | |||
341 | return XSCALE_CYCLE_COUNTER; | ||
342 | } else { | ||
343 | if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) | ||
344 | return XSCALE_COUNTER1; | ||
345 | |||
346 | if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) | ||
347 | return XSCALE_COUNTER0; | ||
348 | |||
349 | return -EAGAIN; | ||
350 | } | ||
351 | } | ||
352 | |||
353 | static void | ||
354 | xscale1pmu_start(void) | ||
355 | { | ||
356 | unsigned long flags, val; | ||
357 | |||
358 | spin_lock_irqsave(&pmu_lock, flags); | ||
359 | val = xscale1pmu_read_pmnc(); | ||
360 | val |= XSCALE_PMU_ENABLE; | ||
361 | xscale1pmu_write_pmnc(val); | ||
362 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
363 | } | ||
364 | |||
365 | static void | ||
366 | xscale1pmu_stop(void) | ||
367 | { | ||
368 | unsigned long flags, val; | ||
369 | |||
370 | spin_lock_irqsave(&pmu_lock, flags); | ||
371 | val = xscale1pmu_read_pmnc(); | ||
372 | val &= ~XSCALE_PMU_ENABLE; | ||
373 | xscale1pmu_write_pmnc(val); | ||
374 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
375 | } | ||
376 | |||
377 | static inline u32 | ||
378 | xscale1pmu_read_counter(int counter) | ||
379 | { | ||
380 | u32 val = 0; | ||
381 | |||
382 | switch (counter) { | ||
383 | case XSCALE_CYCLE_COUNTER: | ||
384 | asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val)); | ||
385 | break; | ||
386 | case XSCALE_COUNTER0: | ||
387 | asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val)); | ||
388 | break; | ||
389 | case XSCALE_COUNTER1: | ||
390 | asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val)); | ||
391 | break; | ||
392 | } | ||
393 | |||
394 | return val; | ||
395 | } | ||
396 | |||
397 | static inline void | ||
398 | xscale1pmu_write_counter(int counter, u32 val) | ||
399 | { | ||
400 | switch (counter) { | ||
401 | case XSCALE_CYCLE_COUNTER: | ||
402 | asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); | ||
403 | break; | ||
404 | case XSCALE_COUNTER0: | ||
405 | asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val)); | ||
406 | break; | ||
407 | case XSCALE_COUNTER1: | ||
408 | asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val)); | ||
409 | break; | ||
410 | } | ||
411 | } | ||
412 | |||
413 | static const struct arm_pmu xscale1pmu = { | ||
414 | .id = ARM_PERF_PMU_ID_XSCALE1, | ||
415 | .name = "xscale1", | ||
416 | .handle_irq = xscale1pmu_handle_irq, | ||
417 | .enable = xscale1pmu_enable_event, | ||
418 | .disable = xscale1pmu_disable_event, | ||
419 | .read_counter = xscale1pmu_read_counter, | ||
420 | .write_counter = xscale1pmu_write_counter, | ||
421 | .get_event_idx = xscale1pmu_get_event_idx, | ||
422 | .start = xscale1pmu_start, | ||
423 | .stop = xscale1pmu_stop, | ||
424 | .cache_map = &xscale_perf_cache_map, | ||
425 | .event_map = &xscale_perf_map, | ||
426 | .raw_event_mask = 0xFF, | ||
427 | .num_events = 3, | ||
428 | .max_period = (1LLU << 32) - 1, | ||
429 | }; | ||
430 | |||
431 | const struct arm_pmu *__init xscale1pmu_init(void) | ||
432 | { | ||
433 | return &xscale1pmu; | ||
434 | } | ||
435 | |||
436 | #define XSCALE2_OVERFLOWED_MASK 0x01f | ||
437 | #define XSCALE2_CCOUNT_OVERFLOW 0x001 | ||
438 | #define XSCALE2_COUNT0_OVERFLOW 0x002 | ||
439 | #define XSCALE2_COUNT1_OVERFLOW 0x004 | ||
440 | #define XSCALE2_COUNT2_OVERFLOW 0x008 | ||
441 | #define XSCALE2_COUNT3_OVERFLOW 0x010 | ||
442 | #define XSCALE2_CCOUNT_INT_EN 0x001 | ||
443 | #define XSCALE2_COUNT0_INT_EN 0x002 | ||
444 | #define XSCALE2_COUNT1_INT_EN 0x004 | ||
445 | #define XSCALE2_COUNT2_INT_EN 0x008 | ||
446 | #define XSCALE2_COUNT3_INT_EN 0x010 | ||
447 | #define XSCALE2_COUNT0_EVT_SHFT 0 | ||
448 | #define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT) | ||
449 | #define XSCALE2_COUNT1_EVT_SHFT 8 | ||
450 | #define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT) | ||
451 | #define XSCALE2_COUNT2_EVT_SHFT 16 | ||
452 | #define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT) | ||
453 | #define XSCALE2_COUNT3_EVT_SHFT 24 | ||
454 | #define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT) | ||
455 | |||
456 | static inline u32 | ||
457 | xscale2pmu_read_pmnc(void) | ||
458 | { | ||
459 | u32 val; | ||
460 | asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val)); | ||
461 | /* bits 1-2 and 4-23 are read-unpredictable */ | ||
462 | return val & 0xff000009; | ||
463 | } | ||
464 | |||
465 | static inline void | ||
466 | xscale2pmu_write_pmnc(u32 val) | ||
467 | { | ||
468 | /* bits 4-23 are write-as-0, 24-31 are write ignored */ | ||
469 | val &= 0xf; | ||
470 | asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val)); | ||
471 | } | ||
472 | |||
473 | static inline u32 | ||
474 | xscale2pmu_read_overflow_flags(void) | ||
475 | { | ||
476 | u32 val; | ||
477 | asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val)); | ||
478 | return val; | ||
479 | } | ||
480 | |||
481 | static inline void | ||
482 | xscale2pmu_write_overflow_flags(u32 val) | ||
483 | { | ||
484 | asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val)); | ||
485 | } | ||
486 | |||
487 | static inline u32 | ||
488 | xscale2pmu_read_event_select(void) | ||
489 | { | ||
490 | u32 val; | ||
491 | asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val)); | ||
492 | return val; | ||
493 | } | ||
494 | |||
495 | static inline void | ||
496 | xscale2pmu_write_event_select(u32 val) | ||
497 | { | ||
498 | asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val)); | ||
499 | } | ||
500 | |||
501 | static inline u32 | ||
502 | xscale2pmu_read_int_enable(void) | ||
503 | { | ||
504 | u32 val; | ||
505 | asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val)); | ||
506 | return val; | ||
507 | } | ||
508 | |||
509 | static void | ||
510 | xscale2pmu_write_int_enable(u32 val) | ||
511 | { | ||
512 | asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val)); | ||
513 | } | ||
514 | |||
515 | static inline int | ||
516 | xscale2_pmnc_counter_has_overflowed(unsigned long of_flags, | ||
517 | enum xscale_counters counter) | ||
518 | { | ||
519 | int ret = 0; | ||
520 | |||
521 | switch (counter) { | ||
522 | case XSCALE_CYCLE_COUNTER: | ||
523 | ret = of_flags & XSCALE2_CCOUNT_OVERFLOW; | ||
524 | break; | ||
525 | case XSCALE_COUNTER0: | ||
526 | ret = of_flags & XSCALE2_COUNT0_OVERFLOW; | ||
527 | break; | ||
528 | case XSCALE_COUNTER1: | ||
529 | ret = of_flags & XSCALE2_COUNT1_OVERFLOW; | ||
530 | break; | ||
531 | case XSCALE_COUNTER2: | ||
532 | ret = of_flags & XSCALE2_COUNT2_OVERFLOW; | ||
533 | break; | ||
534 | case XSCALE_COUNTER3: | ||
535 | ret = of_flags & XSCALE2_COUNT3_OVERFLOW; | ||
536 | break; | ||
537 | default: | ||
538 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
539 | } | ||
540 | |||
541 | return ret; | ||
542 | } | ||
543 | |||
544 | static irqreturn_t | ||
545 | xscale2pmu_handle_irq(int irq_num, void *dev) | ||
546 | { | ||
547 | unsigned long pmnc, of_flags; | ||
548 | struct perf_sample_data data; | ||
549 | struct cpu_hw_events *cpuc; | ||
550 | struct pt_regs *regs; | ||
551 | int idx; | ||
552 | |||
553 | /* Disable the PMU. */ | ||
554 | pmnc = xscale2pmu_read_pmnc(); | ||
555 | xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); | ||
556 | |||
557 | /* Check the overflow flag register. */ | ||
558 | of_flags = xscale2pmu_read_overflow_flags(); | ||
559 | if (!(of_flags & XSCALE2_OVERFLOWED_MASK)) | ||
560 | return IRQ_NONE; | ||
561 | |||
562 | /* Clear the overflow bits. */ | ||
563 | xscale2pmu_write_overflow_flags(of_flags); | ||
564 | |||
565 | regs = get_irq_regs(); | ||
566 | |||
567 | perf_sample_data_init(&data, 0); | ||
568 | |||
569 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
570 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
571 | struct perf_event *event = cpuc->events[idx]; | ||
572 | struct hw_perf_event *hwc; | ||
573 | |||
574 | if (!test_bit(idx, cpuc->active_mask)) | ||
575 | continue; | ||
576 | |||
577 | if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) | ||
578 | continue; | ||
579 | |||
580 | hwc = &event->hw; | ||
581 | armpmu_event_update(event, hwc, idx); | ||
582 | data.period = event->hw.last_period; | ||
583 | if (!armpmu_event_set_period(event, hwc, idx)) | ||
584 | continue; | ||
585 | |||
586 | if (perf_event_overflow(event, 0, &data, regs)) | ||
587 | armpmu->disable(hwc, idx); | ||
588 | } | ||
589 | |||
590 | irq_work_run(); | ||
591 | |||
592 | /* | ||
593 | * Re-enable the PMU. | ||
594 | */ | ||
595 | pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE; | ||
596 | xscale2pmu_write_pmnc(pmnc); | ||
597 | |||
598 | return IRQ_HANDLED; | ||
599 | } | ||
600 | |||
601 | static void | ||
602 | xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
603 | { | ||
604 | unsigned long flags, ien, evtsel; | ||
605 | |||
606 | ien = xscale2pmu_read_int_enable(); | ||
607 | evtsel = xscale2pmu_read_event_select(); | ||
608 | |||
609 | switch (idx) { | ||
610 | case XSCALE_CYCLE_COUNTER: | ||
611 | ien |= XSCALE2_CCOUNT_INT_EN; | ||
612 | break; | ||
613 | case XSCALE_COUNTER0: | ||
614 | ien |= XSCALE2_COUNT0_INT_EN; | ||
615 | evtsel &= ~XSCALE2_COUNT0_EVT_MASK; | ||
616 | evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT; | ||
617 | break; | ||
618 | case XSCALE_COUNTER1: | ||
619 | ien |= XSCALE2_COUNT1_INT_EN; | ||
620 | evtsel &= ~XSCALE2_COUNT1_EVT_MASK; | ||
621 | evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT; | ||
622 | break; | ||
623 | case XSCALE_COUNTER2: | ||
624 | ien |= XSCALE2_COUNT2_INT_EN; | ||
625 | evtsel &= ~XSCALE2_COUNT2_EVT_MASK; | ||
626 | evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT; | ||
627 | break; | ||
628 | case XSCALE_COUNTER3: | ||
629 | ien |= XSCALE2_COUNT3_INT_EN; | ||
630 | evtsel &= ~XSCALE2_COUNT3_EVT_MASK; | ||
631 | evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT; | ||
632 | break; | ||
633 | default: | ||
634 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
635 | return; | ||
636 | } | ||
637 | |||
638 | spin_lock_irqsave(&pmu_lock, flags); | ||
639 | xscale2pmu_write_event_select(evtsel); | ||
640 | xscale2pmu_write_int_enable(ien); | ||
641 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
642 | } | ||
643 | |||
644 | static void | ||
645 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
646 | { | ||
647 | unsigned long flags, ien, evtsel; | ||
648 | |||
649 | ien = xscale2pmu_read_int_enable(); | ||
650 | evtsel = xscale2pmu_read_event_select(); | ||
651 | |||
652 | switch (idx) { | ||
653 | case XSCALE_CYCLE_COUNTER: | ||
654 | ien &= ~XSCALE2_CCOUNT_INT_EN; | ||
655 | break; | ||
656 | case XSCALE_COUNTER0: | ||
657 | ien &= ~XSCALE2_COUNT0_INT_EN; | ||
658 | evtsel &= ~XSCALE2_COUNT0_EVT_MASK; | ||
659 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; | ||
660 | break; | ||
661 | case XSCALE_COUNTER1: | ||
662 | ien &= ~XSCALE2_COUNT1_INT_EN; | ||
663 | evtsel &= ~XSCALE2_COUNT1_EVT_MASK; | ||
664 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; | ||
665 | break; | ||
666 | case XSCALE_COUNTER2: | ||
667 | ien &= ~XSCALE2_COUNT2_INT_EN; | ||
668 | evtsel &= ~XSCALE2_COUNT2_EVT_MASK; | ||
669 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; | ||
670 | break; | ||
671 | case XSCALE_COUNTER3: | ||
672 | ien &= ~XSCALE2_COUNT3_INT_EN; | ||
673 | evtsel &= ~XSCALE2_COUNT3_EVT_MASK; | ||
674 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; | ||
675 | break; | ||
676 | default: | ||
677 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
678 | return; | ||
679 | } | ||
680 | |||
681 | spin_lock_irqsave(&pmu_lock, flags); | ||
682 | xscale2pmu_write_event_select(evtsel); | ||
683 | xscale2pmu_write_int_enable(ien); | ||
684 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
685 | } | ||
686 | |||
687 | static int | ||
688 | xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc, | ||
689 | struct hw_perf_event *event) | ||
690 | { | ||
691 | int idx = xscale1pmu_get_event_idx(cpuc, event); | ||
692 | if (idx >= 0) | ||
693 | goto out; | ||
694 | |||
695 | if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask)) | ||
696 | idx = XSCALE_COUNTER3; | ||
697 | else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask)) | ||
698 | idx = XSCALE_COUNTER2; | ||
699 | out: | ||
700 | return idx; | ||
701 | } | ||
702 | |||
703 | static void | ||
704 | xscale2pmu_start(void) | ||
705 | { | ||
706 | unsigned long flags, val; | ||
707 | |||
708 | spin_lock_irqsave(&pmu_lock, flags); | ||
709 | val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; | ||
710 | val |= XSCALE_PMU_ENABLE; | ||
711 | xscale2pmu_write_pmnc(val); | ||
712 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
713 | } | ||
714 | |||
715 | static void | ||
716 | xscale2pmu_stop(void) | ||
717 | { | ||
718 | unsigned long flags, val; | ||
719 | |||
720 | spin_lock_irqsave(&pmu_lock, flags); | ||
721 | val = xscale2pmu_read_pmnc(); | ||
722 | val &= ~XSCALE_PMU_ENABLE; | ||
723 | xscale2pmu_write_pmnc(val); | ||
724 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
725 | } | ||
726 | |||
727 | static inline u32 | ||
728 | xscale2pmu_read_counter(int counter) | ||
729 | { | ||
730 | u32 val = 0; | ||
731 | |||
732 | switch (counter) { | ||
733 | case XSCALE_CYCLE_COUNTER: | ||
734 | asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val)); | ||
735 | break; | ||
736 | case XSCALE_COUNTER0: | ||
737 | asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val)); | ||
738 | break; | ||
739 | case XSCALE_COUNTER1: | ||
740 | asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val)); | ||
741 | break; | ||
742 | case XSCALE_COUNTER2: | ||
743 | asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val)); | ||
744 | break; | ||
745 | case XSCALE_COUNTER3: | ||
746 | asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val)); | ||
747 | break; | ||
748 | } | ||
749 | |||
750 | return val; | ||
751 | } | ||
752 | |||
753 | static inline void | ||
754 | xscale2pmu_write_counter(int counter, u32 val) | ||
755 | { | ||
756 | switch (counter) { | ||
757 | case XSCALE_CYCLE_COUNTER: | ||
758 | asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); | ||
759 | break; | ||
760 | case XSCALE_COUNTER0: | ||
761 | asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val)); | ||
762 | break; | ||
763 | case XSCALE_COUNTER1: | ||
764 | asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val)); | ||
765 | break; | ||
766 | case XSCALE_COUNTER2: | ||
767 | asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val)); | ||
768 | break; | ||
769 | case XSCALE_COUNTER3: | ||
770 | asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val)); | ||
771 | break; | ||
772 | } | ||
773 | } | ||
774 | |||
775 | static const struct arm_pmu xscale2pmu = { | ||
776 | .id = ARM_PERF_PMU_ID_XSCALE2, | ||
777 | .name = "xscale2", | ||
778 | .handle_irq = xscale2pmu_handle_irq, | ||
779 | .enable = xscale2pmu_enable_event, | ||
780 | .disable = xscale2pmu_disable_event, | ||
781 | .read_counter = xscale2pmu_read_counter, | ||
782 | .write_counter = xscale2pmu_write_counter, | ||
783 | .get_event_idx = xscale2pmu_get_event_idx, | ||
784 | .start = xscale2pmu_start, | ||
785 | .stop = xscale2pmu_stop, | ||
786 | .cache_map = &xscale_perf_cache_map, | ||
787 | .event_map = &xscale_perf_map, | ||
788 | .raw_event_mask = 0xFF, | ||
789 | .num_events = 5, | ||
790 | .max_period = (1LLU << 32) - 1, | ||
791 | }; | ||
792 | |||
793 | const struct arm_pmu *__init xscale2pmu_init(void) | ||
794 | { | ||
795 | return &xscale2pmu; | ||
796 | } | ||
797 | #else | ||
798 | const struct arm_pmu *__init xscale1pmu_init(void) | ||
799 | { | ||
800 | return NULL; | ||
801 | } | ||
802 | |||
803 | const struct arm_pmu *__init xscale2pmu_init(void) | ||
804 | { | ||
805 | return NULL; | ||
806 | } | ||
807 | #endif /* CONFIG_CPU_XSCALE */ | ||
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S index 1e4cbd4e7be9..64f6bc1a9132 100644 --- a/arch/arm/lib/findbit.S +++ b/arch/arm/lib/findbit.S | |||
@@ -174,8 +174,8 @@ ENDPROC(_find_next_bit_be) | |||
174 | */ | 174 | */ |
175 | .L_found: | 175 | .L_found: |
176 | #if __LINUX_ARM_ARCH__ >= 5 | 176 | #if __LINUX_ARM_ARCH__ >= 5 |
177 | rsb r1, r3, #0 | 177 | rsb r0, r3, #0 |
178 | and r3, r3, r1 | 178 | and r3, r3, r0 |
179 | clz r3, r3 | 179 | clz r3, r3 |
180 | rsb r3, r3, #31 | 180 | rsb r3, r3, #31 |
181 | add r0, r2, r3 | 181 | add r0, r2, r3 |
@@ -190,5 +190,7 @@ ENDPROC(_find_next_bit_be) | |||
190 | addeq r2, r2, #1 | 190 | addeq r2, r2, #1 |
191 | mov r0, r2 | 191 | mov r0, r2 |
192 | #endif | 192 | #endif |
193 | cmp r1, r0 @ Clamp to maxbit | ||
194 | movlo r0, r1 | ||
193 | mov pc, lr | 195 | mov pc, lr |
194 | 196 | ||
diff --git a/arch/arm/mach-aaec2000/include/mach/vmalloc.h b/arch/arm/mach-aaec2000/include/mach/vmalloc.h index cff4e0a996ce..a6299e8321bd 100644 --- a/arch/arm/mach-aaec2000/include/mach/vmalloc.h +++ b/arch/arm/mach-aaec2000/include/mach/vmalloc.h | |||
@@ -11,6 +11,6 @@ | |||
11 | #ifndef __ASM_ARCH_VMALLOC_H | 11 | #ifndef __ASM_ARCH_VMALLOC_H |
12 | #define __ASM_ARCH_VMALLOC_H | 12 | #define __ASM_ARCH_VMALLOC_H |
13 | 13 | ||
14 | #define VMALLOC_END 0xd0000000 | 14 | #define VMALLOC_END 0xd0000000UL |
15 | 15 | ||
16 | #endif /* __ASM_ARCH_VMALLOC_H */ | 16 | #endif /* __ASM_ARCH_VMALLOC_H */ |
diff --git a/arch/arm/mach-bcmring/include/mach/vmalloc.h b/arch/arm/mach-bcmring/include/mach/vmalloc.h index 3db3a09fd398..7397bd7817d9 100644 --- a/arch/arm/mach-bcmring/include/mach/vmalloc.h +++ b/arch/arm/mach-bcmring/include/mach/vmalloc.h | |||
@@ -22,4 +22,4 @@ | |||
22 | * 0xe0000000 to 0xefffffff. This gives us 256 MB of vm space and handles | 22 | * 0xe0000000 to 0xefffffff. This gives us 256 MB of vm space and handles |
23 | * larger physical memory designs better. | 23 | * larger physical memory designs better. |
24 | */ | 24 | */ |
25 | #define VMALLOC_END 0xf0000000 | 25 | #define VMALLOC_END 0xf0000000UL |
diff --git a/arch/arm/mach-clps711x/include/mach/vmalloc.h b/arch/arm/mach-clps711x/include/mach/vmalloc.h index 30b3a287ed88..467b96137e47 100644 --- a/arch/arm/mach-clps711x/include/mach/vmalloc.h +++ b/arch/arm/mach-clps711x/include/mach/vmalloc.h | |||
@@ -17,4 +17,4 @@ | |||
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | */ | 19 | */ |
20 | #define VMALLOC_END 0xd0000000 | 20 | #define VMALLOC_END 0xd0000000UL |
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 9be261beae7d..2652af124acd 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c | |||
@@ -359,8 +359,8 @@ static struct clk_lookup dm355_clks[] = { | |||
359 | CLK(NULL, "uart1", &uart1_clk), | 359 | CLK(NULL, "uart1", &uart1_clk), |
360 | CLK(NULL, "uart2", &uart2_clk), | 360 | CLK(NULL, "uart2", &uart2_clk), |
361 | CLK("i2c_davinci.1", NULL, &i2c_clk), | 361 | CLK("i2c_davinci.1", NULL, &i2c_clk), |
362 | CLK("davinci-asp.0", NULL, &asp0_clk), | 362 | CLK("davinci-mcbsp.0", NULL, &asp0_clk), |
363 | CLK("davinci-asp.1", NULL, &asp1_clk), | 363 | CLK("davinci-mcbsp.1", NULL, &asp1_clk), |
364 | CLK("davinci_mmc.0", NULL, &mmcsd0_clk), | 364 | CLK("davinci_mmc.0", NULL, &mmcsd0_clk), |
365 | CLK("davinci_mmc.1", NULL, &mmcsd1_clk), | 365 | CLK("davinci_mmc.1", NULL, &mmcsd1_clk), |
366 | CLK("spi_davinci.0", NULL, &spi0_clk), | 366 | CLK("spi_davinci.0", NULL, &spi0_clk), |
@@ -664,7 +664,7 @@ static struct resource dm355_asp1_resources[] = { | |||
664 | }; | 664 | }; |
665 | 665 | ||
666 | static struct platform_device dm355_asp1_device = { | 666 | static struct platform_device dm355_asp1_device = { |
667 | .name = "davinci-asp", | 667 | .name = "davinci-mcbsp", |
668 | .id = 1, | 668 | .id = 1, |
669 | .num_resources = ARRAY_SIZE(dm355_asp1_resources), | 669 | .num_resources = ARRAY_SIZE(dm355_asp1_resources), |
670 | .resource = dm355_asp1_resources, | 670 | .resource = dm355_asp1_resources, |
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index a12065e87266..c466d710d3c1 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c | |||
@@ -459,7 +459,7 @@ static struct clk_lookup dm365_clks[] = { | |||
459 | CLK(NULL, "usb", &usb_clk), | 459 | CLK(NULL, "usb", &usb_clk), |
460 | CLK("davinci_emac.1", NULL, &emac_clk), | 460 | CLK("davinci_emac.1", NULL, &emac_clk), |
461 | CLK("davinci_voicecodec", NULL, &voicecodec_clk), | 461 | CLK("davinci_voicecodec", NULL, &voicecodec_clk), |
462 | CLK("davinci-asp.0", NULL, &asp0_clk), | 462 | CLK("davinci-mcbsp", NULL, &asp0_clk), |
463 | CLK(NULL, "rto", &rto_clk), | 463 | CLK(NULL, "rto", &rto_clk), |
464 | CLK(NULL, "mjcp", &mjcp_clk), | 464 | CLK(NULL, "mjcp", &mjcp_clk), |
465 | CLK(NULL, NULL, NULL), | 465 | CLK(NULL, NULL, NULL), |
@@ -922,8 +922,8 @@ static struct resource dm365_asp_resources[] = { | |||
922 | }; | 922 | }; |
923 | 923 | ||
924 | static struct platform_device dm365_asp_device = { | 924 | static struct platform_device dm365_asp_device = { |
925 | .name = "davinci-asp", | 925 | .name = "davinci-mcbsp", |
926 | .id = 0, | 926 | .id = -1, |
927 | .num_resources = ARRAY_SIZE(dm365_asp_resources), | 927 | .num_resources = ARRAY_SIZE(dm365_asp_resources), |
928 | .resource = dm365_asp_resources, | 928 | .resource = dm365_asp_resources, |
929 | }; | 929 | }; |
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index 0608dd776a16..9a2376b3137c 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c | |||
@@ -302,7 +302,7 @@ static struct clk_lookup dm644x_clks[] = { | |||
302 | CLK("davinci_emac.1", NULL, &emac_clk), | 302 | CLK("davinci_emac.1", NULL, &emac_clk), |
303 | CLK("i2c_davinci.1", NULL, &i2c_clk), | 303 | CLK("i2c_davinci.1", NULL, &i2c_clk), |
304 | CLK("palm_bk3710", NULL, &ide_clk), | 304 | CLK("palm_bk3710", NULL, &ide_clk), |
305 | CLK("davinci-asp", NULL, &asp_clk), | 305 | CLK("davinci-mcbsp", NULL, &asp_clk), |
306 | CLK("davinci_mmc.0", NULL, &mmcsd_clk), | 306 | CLK("davinci_mmc.0", NULL, &mmcsd_clk), |
307 | CLK(NULL, "spi", &spi_clk), | 307 | CLK(NULL, "spi", &spi_clk), |
308 | CLK(NULL, "gpio", &gpio_clk), | 308 | CLK(NULL, "gpio", &gpio_clk), |
@@ -580,7 +580,7 @@ static struct resource dm644x_asp_resources[] = { | |||
580 | }; | 580 | }; |
581 | 581 | ||
582 | static struct platform_device dm644x_asp_device = { | 582 | static struct platform_device dm644x_asp_device = { |
583 | .name = "davinci-asp", | 583 | .name = "davinci-mcbsp", |
584 | .id = -1, | 584 | .id = -1, |
585 | .num_resources = ARRAY_SIZE(dm644x_asp_resources), | 585 | .num_resources = ARRAY_SIZE(dm644x_asp_resources), |
586 | .resource = dm644x_asp_resources, | 586 | .resource = dm644x_asp_resources, |
diff --git a/arch/arm/mach-ebsa110/include/mach/vmalloc.h b/arch/arm/mach-ebsa110/include/mach/vmalloc.h index 60bde56fba4c..ea141b7a3e03 100644 --- a/arch/arm/mach-ebsa110/include/mach/vmalloc.h +++ b/arch/arm/mach-ebsa110/include/mach/vmalloc.h | |||
@@ -7,4 +7,4 @@ | |||
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #define VMALLOC_END 0xdf000000 | 10 | #define VMALLOC_END 0xdf000000UL |
diff --git a/arch/arm/mach-footbridge/include/mach/vmalloc.h b/arch/arm/mach-footbridge/include/mach/vmalloc.h index 0ffbb7c85e59..40ba78e5782b 100644 --- a/arch/arm/mach-footbridge/include/mach/vmalloc.h +++ b/arch/arm/mach-footbridge/include/mach/vmalloc.h | |||
@@ -7,4 +7,4 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | 9 | ||
10 | #define VMALLOC_END 0xf0000000 | 10 | #define VMALLOC_END 0xf0000000UL |
diff --git a/arch/arm/mach-h720x/include/mach/vmalloc.h b/arch/arm/mach-h720x/include/mach/vmalloc.h index a45915b88756..8520b4a4d4e6 100644 --- a/arch/arm/mach-h720x/include/mach/vmalloc.h +++ b/arch/arm/mach-h720x/include/mach/vmalloc.h | |||
@@ -5,6 +5,6 @@ | |||
5 | #ifndef __ARCH_ARM_VMALLOC_H | 5 | #ifndef __ARCH_ARM_VMALLOC_H |
6 | #define __ARCH_ARM_VMALLOC_H | 6 | #define __ARCH_ARM_VMALLOC_H |
7 | 7 | ||
8 | #define VMALLOC_END 0xd0000000 | 8 | #define VMALLOC_END 0xd0000000UL |
9 | 9 | ||
10 | #endif | 10 | #endif |
diff --git a/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c index 026263c665ca..7e1e9dc2c8fc 100644 --- a/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c +++ b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c | |||
@@ -250,9 +250,6 @@ static const struct imxuart_platform_data uart_pdata __initconst = { | |||
250 | .flags = IMXUART_HAVE_RTSCTS, | 250 | .flags = IMXUART_HAVE_RTSCTS, |
251 | }; | 251 | }; |
252 | 252 | ||
253 | #if defined(CONFIG_TOUCHSCREEN_ADS7846) \ | ||
254 | || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) | ||
255 | |||
256 | #define ADS7846_PENDOWN (GPIO_PORTD | 25) | 253 | #define ADS7846_PENDOWN (GPIO_PORTD | 25) |
257 | 254 | ||
258 | static void ads7846_dev_init(void) | 255 | static void ads7846_dev_init(void) |
@@ -273,9 +270,7 @@ static struct ads7846_platform_data ads7846_config __initdata = { | |||
273 | .get_pendown_state = ads7846_get_pendown_state, | 270 | .get_pendown_state = ads7846_get_pendown_state, |
274 | .keep_vref_on = 1, | 271 | .keep_vref_on = 1, |
275 | }; | 272 | }; |
276 | #endif | ||
277 | 273 | ||
278 | #if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE) | ||
279 | static struct spi_board_info eukrea_mbimx27_spi_board_info[] __initdata = { | 274 | static struct spi_board_info eukrea_mbimx27_spi_board_info[] __initdata = { |
280 | [0] = { | 275 | [0] = { |
281 | .modalias = "ads7846", | 276 | .modalias = "ads7846", |
@@ -294,7 +289,6 @@ static const struct spi_imx_master eukrea_mbimx27_spi0_data __initconst = { | |||
294 | .chipselect = eukrea_mbimx27_spi_cs, | 289 | .chipselect = eukrea_mbimx27_spi_cs, |
295 | .num_chipselect = ARRAY_SIZE(eukrea_mbimx27_spi_cs), | 290 | .num_chipselect = ARRAY_SIZE(eukrea_mbimx27_spi_cs), |
296 | }; | 291 | }; |
297 | #endif | ||
298 | 292 | ||
299 | static struct i2c_board_info eukrea_mbimx27_i2c_devices[] = { | 293 | static struct i2c_board_info eukrea_mbimx27_i2c_devices[] = { |
300 | { | 294 | { |
diff --git a/arch/arm/mach-integrator/include/mach/vmalloc.h b/arch/arm/mach-integrator/include/mach/vmalloc.h index e056e7cf5645..2f5a2bafb11f 100644 --- a/arch/arm/mach-integrator/include/mach/vmalloc.h +++ b/arch/arm/mach-integrator/include/mach/vmalloc.h | |||
@@ -17,4 +17,4 @@ | |||
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | */ | 19 | */ |
20 | #define VMALLOC_END 0xd0000000 | 20 | #define VMALLOC_END 0xd0000000UL |
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index dbbcfeb919db..31e5fd63ec9a 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig | |||
@@ -49,6 +49,8 @@ endchoice | |||
49 | 49 | ||
50 | config MSM_SOC_REV_A | 50 | config MSM_SOC_REV_A |
51 | bool | 51 | bool |
52 | config ARCH_MSM_SCORPIONMP | ||
53 | bool | ||
52 | 54 | ||
53 | config ARCH_MSM_ARM11 | 55 | config ARCH_MSM_ARM11 |
54 | bool | 56 | bool |
diff --git a/arch/arm/mach-msm/include/mach/vmalloc.h b/arch/arm/mach-msm/include/mach/vmalloc.h index 31a32ad062dc..d138448eff16 100644 --- a/arch/arm/mach-msm/include/mach/vmalloc.h +++ b/arch/arm/mach-msm/include/mach/vmalloc.h | |||
@@ -16,7 +16,7 @@ | |||
16 | #ifndef __ASM_ARCH_MSM_VMALLOC_H | 16 | #ifndef __ASM_ARCH_MSM_VMALLOC_H |
17 | #define __ASM_ARCH_MSM_VMALLOC_H | 17 | #define __ASM_ARCH_MSM_VMALLOC_H |
18 | 18 | ||
19 | #define VMALLOC_END 0xd0000000 | 19 | #define VMALLOC_END 0xd0000000UL |
20 | 20 | ||
21 | #endif | 21 | #endif |
22 | 22 | ||
diff --git a/arch/arm/mach-mx25/devices-imx25.h b/arch/arm/mach-mx25/devices-imx25.h index 93afa10b13cf..d94d282fa676 100644 --- a/arch/arm/mach-mx25/devices-imx25.h +++ b/arch/arm/mach-mx25/devices-imx25.h | |||
@@ -42,9 +42,9 @@ extern const struct imx_mxc_nand_data imx25_mxc_nand_data __initconst; | |||
42 | #define imx25_add_mxc_nand(pdata) \ | 42 | #define imx25_add_mxc_nand(pdata) \ |
43 | imx_add_mxc_nand(&imx25_mxc_nand_data, pdata) | 43 | imx_add_mxc_nand(&imx25_mxc_nand_data, pdata) |
44 | 44 | ||
45 | extern const struct imx_spi_imx_data imx25_spi_imx_data[] __initconst; | 45 | extern const struct imx_spi_imx_data imx25_cspi_data[] __initconst; |
46 | #define imx25_add_spi_imx(id, pdata) \ | 46 | #define imx25_add_spi_imx(id, pdata) \ |
47 | imx_add_spi_imx(&imx25_spi_imx_data[id], pdata) | 47 | imx_add_spi_imx(&imx25_cspi_data[id], pdata) |
48 | #define imx25_add_spi_imx0(pdata) imx25_add_spi_imx(0, pdata) | 48 | #define imx25_add_spi_imx0(pdata) imx25_add_spi_imx(0, pdata) |
49 | #define imx25_add_spi_imx1(pdata) imx25_add_spi_imx(1, pdata) | 49 | #define imx25_add_spi_imx1(pdata) imx25_add_spi_imx(1, pdata) |
50 | #define imx25_add_spi_imx2(pdata) imx25_add_spi_imx(2, pdata) | 50 | #define imx25_add_spi_imx2(pdata) imx25_add_spi_imx(2, pdata) |
diff --git a/arch/arm/mach-mx3/mach-pcm037_eet.c b/arch/arm/mach-mx3/mach-pcm037_eet.c index 99e0894e07db..fda56545d2fd 100644 --- a/arch/arm/mach-mx3/mach-pcm037_eet.c +++ b/arch/arm/mach-mx3/mach-pcm037_eet.c | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <mach/common.h> | 15 | #include <mach/common.h> |
16 | #include <mach/iomux-mx3.h> | 16 | #include <mach/iomux-mx3.h> |
17 | #include <mach/spi.h> | ||
17 | 18 | ||
18 | #include <asm/mach-types.h> | 19 | #include <asm/mach-types.h> |
19 | 20 | ||
@@ -59,14 +60,12 @@ static struct spi_board_info pcm037_spi_dev[] = { | |||
59 | }; | 60 | }; |
60 | 61 | ||
61 | /* Platform Data for MXC CSPI */ | 62 | /* Platform Data for MXC CSPI */ |
62 | #if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE) | ||
63 | static int pcm037_spi1_cs[] = {MXC_SPI_CS(1), IOMUX_TO_GPIO(MX31_PIN_KEY_COL7)}; | 63 | static int pcm037_spi1_cs[] = {MXC_SPI_CS(1), IOMUX_TO_GPIO(MX31_PIN_KEY_COL7)}; |
64 | 64 | ||
65 | static const struct spi_imx_master pcm037_spi1_pdata __initconst = { | 65 | static const struct spi_imx_master pcm037_spi1_pdata __initconst = { |
66 | .chipselect = pcm037_spi1_cs, | 66 | .chipselect = pcm037_spi1_cs, |
67 | .num_chipselect = ARRAY_SIZE(pcm037_spi1_cs), | 67 | .num_chipselect = ARRAY_SIZE(pcm037_spi1_cs), |
68 | }; | 68 | }; |
69 | #endif | ||
70 | 69 | ||
71 | /* GPIO-keys input device */ | 70 | /* GPIO-keys input device */ |
72 | static struct gpio_keys_button pcm037_gpio_keys[] = { | 71 | static struct gpio_keys_button pcm037_gpio_keys[] = { |
@@ -171,7 +170,7 @@ static struct platform_device pcm037_gpio_keys_device = { | |||
171 | }, | 170 | }, |
172 | }; | 171 | }; |
173 | 172 | ||
174 | static int eet_init_devices(void) | 173 | static int __init eet_init_devices(void) |
175 | { | 174 | { |
176 | if (!machine_is_pcm037() || pcm037_variant() != PCM037_EET) | 175 | if (!machine_is_pcm037() || pcm037_variant() != PCM037_EET) |
177 | return 0; | 176 | return 0; |
diff --git a/arch/arm/mach-netx/include/mach/vmalloc.h b/arch/arm/mach-netx/include/mach/vmalloc.h index 7cca3574308f..871f1ef7bff5 100644 --- a/arch/arm/mach-netx/include/mach/vmalloc.h +++ b/arch/arm/mach-netx/include/mach/vmalloc.h | |||
@@ -16,4 +16,4 @@ | |||
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ | 18 | */ |
19 | #define VMALLOC_END 0xd0000000 | 19 | #define VMALLOC_END 0xd0000000UL |
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c index ea0d80a89da7..e7f9ee63dce5 100644 --- a/arch/arm/mach-omap1/devices.c +++ b/arch/arm/mach-omap1/devices.c | |||
@@ -321,10 +321,9 @@ static struct platform_device omap_wdt_device = { | |||
321 | static int __init omap_init_wdt(void) | 321 | static int __init omap_init_wdt(void) |
322 | { | 322 | { |
323 | if (!cpu_is_omap16xx()) | 323 | if (!cpu_is_omap16xx()) |
324 | return; | 324 | return -ENODEV; |
325 | 325 | ||
326 | platform_device_register(&omap_wdt_device); | 326 | return platform_device_register(&omap_wdt_device); |
327 | return 0; | ||
328 | } | 327 | } |
329 | subsys_initcall(omap_init_wdt); | 328 | subsys_initcall(omap_init_wdt); |
330 | #endif | 329 | #endif |
diff --git a/arch/arm/mach-omap1/include/mach/camera.h b/arch/arm/mach-omap1/include/mach/camera.h index fd54b452eb22..847d00f0bb0a 100644 --- a/arch/arm/mach-omap1/include/mach/camera.h +++ b/arch/arm/mach-omap1/include/mach/camera.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __ASM_ARCH_CAMERA_H_ | 1 | #ifndef __ASM_ARCH_CAMERA_H_ |
2 | #define __ASM_ARCH_CAMERA_H_ | 2 | #define __ASM_ARCH_CAMERA_H_ |
3 | 3 | ||
4 | #include <media/omap1_camera.h> | ||
5 | |||
4 | void omap1_camera_init(void *); | 6 | void omap1_camera_init(void *); |
5 | 7 | ||
6 | static inline void omap1_set_camera_info(struct omap1_cam_platform_data *info) | 8 | static inline void omap1_set_camera_info(struct omap1_cam_platform_data *info) |
diff --git a/arch/arm/mach-omap1/include/mach/vmalloc.h b/arch/arm/mach-omap1/include/mach/vmalloc.h index b001f67d695b..22ec4a479577 100644 --- a/arch/arm/mach-omap1/include/mach/vmalloc.h +++ b/arch/arm/mach-omap1/include/mach/vmalloc.h | |||
@@ -17,4 +17,4 @@ | |||
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | */ | 19 | */ |
20 | #define VMALLOC_END 0xd8000000 | 20 | #define VMALLOC_END 0xd8000000UL |
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c index 067f4379c87f..53ac762518bd 100644 --- a/arch/arm/mach-omap2/board-devkit8000.c +++ b/arch/arm/mach-omap2/board-devkit8000.c | |||
@@ -242,9 +242,6 @@ static int devkit8000_twl_gpio_setup(struct device *dev, | |||
242 | mmc[0].gpio_cd = gpio + 0; | 242 | mmc[0].gpio_cd = gpio + 0; |
243 | omap2_hsmmc_init(mmc); | 243 | omap2_hsmmc_init(mmc); |
244 | 244 | ||
245 | /* link regulators to MMC adapters */ | ||
246 | devkit8000_vmmc1_supply.dev = mmc[0].dev; | ||
247 | |||
248 | /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ | 245 | /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ |
249 | gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; | 246 | gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; |
250 | 247 | ||
diff --git a/arch/arm/mach-omap2/include/mach/vmalloc.h b/arch/arm/mach-omap2/include/mach/vmalloc.h index 4da31e997efe..866319947760 100644 --- a/arch/arm/mach-omap2/include/mach/vmalloc.h +++ b/arch/arm/mach-omap2/include/mach/vmalloc.h | |||
@@ -17,4 +17,4 @@ | |||
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | */ | 19 | */ |
20 | #define VMALLOC_END 0xf8000000 | 20 | #define VMALLOC_END 0xf8000000UL |
diff --git a/arch/arm/mach-pnx4008/include/mach/vmalloc.h b/arch/arm/mach-pnx4008/include/mach/vmalloc.h index 31b65ee07b0b..184913c71141 100644 --- a/arch/arm/mach-pnx4008/include/mach/vmalloc.h +++ b/arch/arm/mach-pnx4008/include/mach/vmalloc.h | |||
@@ -17,4 +17,4 @@ | |||
17 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | 17 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced |
18 | * area for the same reason. ;) | 18 | * area for the same reason. ;) |
19 | */ | 19 | */ |
20 | #define VMALLOC_END 0xd0000000 | 20 | #define VMALLOC_END 0xd0000000UL |
diff --git a/arch/arm/mach-rpc/include/mach/vmalloc.h b/arch/arm/mach-rpc/include/mach/vmalloc.h index 3bcd86fadb81..fb700228637a 100644 --- a/arch/arm/mach-rpc/include/mach/vmalloc.h +++ b/arch/arm/mach-rpc/include/mach/vmalloc.h | |||
@@ -7,4 +7,4 @@ | |||
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #define VMALLOC_END 0xdc000000 | 10 | #define VMALLOC_END 0xdc000000UL |
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig index 1ca7bdc6485c..579d2f0f4dd0 100644 --- a/arch/arm/mach-s3c64xx/Kconfig +++ b/arch/arm/mach-s3c64xx/Kconfig | |||
@@ -143,7 +143,7 @@ config MACH_SMDK6410 | |||
143 | select S3C_DEV_USB_HSOTG | 143 | select S3C_DEV_USB_HSOTG |
144 | select S3C_DEV_WDT | 144 | select S3C_DEV_WDT |
145 | select SAMSUNG_DEV_KEYPAD | 145 | select SAMSUNG_DEV_KEYPAD |
146 | select HAVE_S3C2410_WATCHDOG | 146 | select HAVE_S3C2410_WATCHDOG if WATCHDOG |
147 | select S3C64XX_SETUP_SDHCI | 147 | select S3C64XX_SETUP_SDHCI |
148 | select S3C64XX_SETUP_I2C1 | 148 | select S3C64XX_SETUP_I2C1 |
149 | select S3C64XX_SETUP_IDE | 149 | select S3C64XX_SETUP_IDE |
diff --git a/arch/arm/mach-shark/include/mach/vmalloc.h b/arch/arm/mach-shark/include/mach/vmalloc.h index 8e845b6a7cb5..b10df988526d 100644 --- a/arch/arm/mach-shark/include/mach/vmalloc.h +++ b/arch/arm/mach-shark/include/mach/vmalloc.h | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/arm/mach-shark/include/mach/vmalloc.h | 2 | * arch/arm/mach-shark/include/mach/vmalloc.h |
3 | */ | 3 | */ |
4 | #define VMALLOC_END 0xd0000000 | 4 | #define VMALLOC_END 0xd0000000UL |
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index 32d9e2816e56..d3260542b943 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c | |||
@@ -163,11 +163,13 @@ static struct mtd_partition nor_flash_partitions[] = { | |||
163 | .name = "loader", | 163 | .name = "loader", |
164 | .offset = 0x00000000, | 164 | .offset = 0x00000000, |
165 | .size = 512 * 1024, | 165 | .size = 512 * 1024, |
166 | .mask_flags = MTD_WRITEABLE, | ||
166 | }, | 167 | }, |
167 | { | 168 | { |
168 | .name = "bootenv", | 169 | .name = "bootenv", |
169 | .offset = MTDPART_OFS_APPEND, | 170 | .offset = MTDPART_OFS_APPEND, |
170 | .size = 512 * 1024, | 171 | .size = 512 * 1024, |
172 | .mask_flags = MTD_WRITEABLE, | ||
171 | }, | 173 | }, |
172 | { | 174 | { |
173 | .name = "kernel_ro", | 175 | .name = "kernel_ro", |
@@ -581,6 +583,10 @@ static int fsi_set_rate(int is_porta, int rate) | |||
581 | return -EINVAL; | 583 | return -EINVAL; |
582 | 584 | ||
583 | switch (rate) { | 585 | switch (rate) { |
586 | case 44100: | ||
587 | clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 11283000)); | ||
588 | ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64; | ||
589 | break; | ||
584 | case 48000: | 590 | case 48000: |
585 | clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 85428000)); | 591 | clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 85428000)); |
586 | clk_set_rate(fdiv_clk, clk_round_rate(fdiv_clk, 12204000)); | 592 | clk_set_rate(fdiv_clk, clk_round_rate(fdiv_clk, 12204000)); |
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c index 7db31e6c6bf2..b25ce90a346e 100644 --- a/arch/arm/mach-shmobile/clock-sh7372.c +++ b/arch/arm/mach-shmobile/clock-sh7372.c | |||
@@ -220,8 +220,7 @@ static void pllc2_disable(struct clk *clk) | |||
220 | __raw_writel(__raw_readl(PLLC2CR) & ~0x80000000, PLLC2CR); | 220 | __raw_writel(__raw_readl(PLLC2CR) & ~0x80000000, PLLC2CR); |
221 | } | 221 | } |
222 | 222 | ||
223 | static int pllc2_set_rate(struct clk *clk, | 223 | static int pllc2_set_rate(struct clk *clk, unsigned long rate) |
224 | unsigned long rate, int algo_id) | ||
225 | { | 224 | { |
226 | unsigned long value; | 225 | unsigned long value; |
227 | int idx; | 226 | int idx; |
@@ -463,8 +462,7 @@ static int fsidiv_enable(struct clk *clk) | |||
463 | return 0; | 462 | return 0; |
464 | } | 463 | } |
465 | 464 | ||
466 | static int fsidiv_set_rate(struct clk *clk, | 465 | static int fsidiv_set_rate(struct clk *clk, unsigned long rate) |
467 | unsigned long rate, int algo_id) | ||
468 | { | 466 | { |
469 | int idx; | 467 | int idx; |
470 | 468 | ||
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c index 4cd3cae38e72..30b2f400666a 100644 --- a/arch/arm/mach-shmobile/intc-sh7372.c +++ b/arch/arm/mach-shmobile/intc-sh7372.c | |||
@@ -98,7 +98,7 @@ static struct intc_vect intca_vectors[] __initdata = { | |||
98 | INTC_VECT(IRQ14A, 0x03c0), INTC_VECT(IRQ15A, 0x03e0), | 98 | INTC_VECT(IRQ14A, 0x03c0), INTC_VECT(IRQ15A, 0x03e0), |
99 | INTC_VECT(IRQ16A, 0x3200), INTC_VECT(IRQ17A, 0x3220), | 99 | INTC_VECT(IRQ16A, 0x3200), INTC_VECT(IRQ17A, 0x3220), |
100 | INTC_VECT(IRQ18A, 0x3240), INTC_VECT(IRQ19A, 0x3260), | 100 | INTC_VECT(IRQ18A, 0x3240), INTC_VECT(IRQ19A, 0x3260), |
101 | INTC_VECT(IRQ20A, 0x3280), INTC_VECT(IRQ31A, 0x32a0), | 101 | INTC_VECT(IRQ20A, 0x3280), INTC_VECT(IRQ21A, 0x32a0), |
102 | INTC_VECT(IRQ22A, 0x32c0), INTC_VECT(IRQ23A, 0x32e0), | 102 | INTC_VECT(IRQ22A, 0x32c0), INTC_VECT(IRQ23A, 0x32e0), |
103 | INTC_VECT(IRQ24A, 0x3300), INTC_VECT(IRQ25A, 0x3320), | 103 | INTC_VECT(IRQ24A, 0x3300), INTC_VECT(IRQ25A, 0x3320), |
104 | INTC_VECT(IRQ26A, 0x3340), INTC_VECT(IRQ27A, 0x3360), | 104 | INTC_VECT(IRQ26A, 0x3340), INTC_VECT(IRQ27A, 0x3360), |
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c index 73fb1a551ec6..608a1372b172 100644 --- a/arch/arm/mach-ux500/cpu.c +++ b/arch/arm/mach-ux500/cpu.c | |||
@@ -75,14 +75,14 @@ void __init ux500_init_irq(void) | |||
75 | static inline void ux500_cache_wait(void __iomem *reg, unsigned long mask) | 75 | static inline void ux500_cache_wait(void __iomem *reg, unsigned long mask) |
76 | { | 76 | { |
77 | /* wait for the operation to complete */ | 77 | /* wait for the operation to complete */ |
78 | while (readl(reg) & mask) | 78 | while (readl_relaxed(reg) & mask) |
79 | ; | 79 | ; |
80 | } | 80 | } |
81 | 81 | ||
82 | static inline void ux500_cache_sync(void) | 82 | static inline void ux500_cache_sync(void) |
83 | { | 83 | { |
84 | void __iomem *base = __io_address(UX500_L2CC_BASE); | 84 | void __iomem *base = __io_address(UX500_L2CC_BASE); |
85 | writel(0, base + L2X0_CACHE_SYNC); | 85 | writel_relaxed(0, base + L2X0_CACHE_SYNC); |
86 | ux500_cache_wait(base + L2X0_CACHE_SYNC, 1); | 86 | ux500_cache_wait(base + L2X0_CACHE_SYNC, 1); |
87 | } | 87 | } |
88 | 88 | ||
@@ -107,7 +107,7 @@ static void ux500_l2x0_inv_all(void) | |||
107 | uint32_t l2x0_way_mask = (1<<16) - 1; /* Bitmask of active ways */ | 107 | uint32_t l2x0_way_mask = (1<<16) - 1; /* Bitmask of active ways */ |
108 | 108 | ||
109 | /* invalidate all ways */ | 109 | /* invalidate all ways */ |
110 | writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); | 110 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); |
111 | ux500_cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); | 111 | ux500_cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); |
112 | ux500_cache_sync(); | 112 | ux500_cache_sync(); |
113 | } | 113 | } |
diff --git a/arch/arm/mach-versatile/include/mach/vmalloc.h b/arch/arm/mach-versatile/include/mach/vmalloc.h index ebd8a2543d3b..7d8e069ad51b 100644 --- a/arch/arm/mach-versatile/include/mach/vmalloc.h +++ b/arch/arm/mach-versatile/include/mach/vmalloc.h | |||
@@ -18,4 +18,4 @@ | |||
18 | * along with this program; if not, write to the Free Software | 18 | * along with this program; if not, write to the Free Software |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 | */ | 20 | */ |
21 | #define VMALLOC_END 0xd8000000 | 21 | #define VMALLOC_END 0xd8000000UL |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 17e7b0b57e49..55c17a6fb22f 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -206,8 +206,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
206 | */ | 206 | */ |
207 | if (pfn_valid(pfn)) { | 207 | if (pfn_valid(pfn)) { |
208 | printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n" | 208 | printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n" |
209 | KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n" | 209 | "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n" |
210 | KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n"); | 210 | "will fail in the next kernel release. Please fix your driver.\n"); |
211 | WARN_ON(1); | 211 | WARN_ON(1); |
212 | } | 212 | } |
213 | 213 | ||
diff --git a/arch/arm/plat-mxc/devices/platform-imx-dma.c b/arch/arm/plat-mxc/devices/platform-imx-dma.c index 02d989018059..3a705c7877dd 100644 --- a/arch/arm/plat-mxc/devices/platform-imx-dma.c +++ b/arch/arm/plat-mxc/devices/platform-imx-dma.c | |||
@@ -12,15 +12,7 @@ | |||
12 | 12 | ||
13 | #include <mach/hardware.h> | 13 | #include <mach/hardware.h> |
14 | #include <mach/devices-common.h> | 14 | #include <mach/devices-common.h> |
15 | #ifdef SDMA_IS_MERGED | ||
16 | #include <mach/sdma.h> | 15 | #include <mach/sdma.h> |
17 | #else | ||
18 | struct sdma_platform_data { | ||
19 | int sdma_version; | ||
20 | char *cpu_name; | ||
21 | int to_version; | ||
22 | }; | ||
23 | #endif | ||
24 | 16 | ||
25 | struct imx_imx_sdma_data { | 17 | struct imx_imx_sdma_data { |
26 | resource_size_t iobase; | 18 | resource_size_t iobase; |
diff --git a/arch/arm/plat-mxc/devices/platform-spi_imx.c b/arch/arm/plat-mxc/devices/platform-spi_imx.c index e48340ec331e..17f724c9452d 100644 --- a/arch/arm/plat-mxc/devices/platform-spi_imx.c +++ b/arch/arm/plat-mxc/devices/platform-spi_imx.c | |||
@@ -27,6 +27,7 @@ const struct imx_spi_imx_data imx21_cspi_data[] __initconst = { | |||
27 | imx_spi_imx_data_entry(MX21, CSPI, "imx21-cspi", _id, _hwid, SZ_4K) | 27 | imx_spi_imx_data_entry(MX21, CSPI, "imx21-cspi", _id, _hwid, SZ_4K) |
28 | imx21_cspi_data_entry(0, 1), | 28 | imx21_cspi_data_entry(0, 1), |
29 | imx21_cspi_data_entry(1, 2), | 29 | imx21_cspi_data_entry(1, 2), |
30 | }; | ||
30 | #endif | 31 | #endif |
31 | 32 | ||
32 | #ifdef CONFIG_ARCH_MX25 | 33 | #ifdef CONFIG_ARCH_MX25 |
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c index aedf9c1d645e..63cdc6025bd7 100644 --- a/arch/arm/plat-nomadik/timer.c +++ b/arch/arm/plat-nomadik/timer.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2008 STMicroelectronics | 4 | * Copyright (C) 2008 STMicroelectronics |
5 | * Copyright (C) 2010 Alessandro Rubini | 5 | * Copyright (C) 2010 Alessandro Rubini |
6 | * Copyright (C) 2010 Linus Walleij for ST-Ericsson | ||
6 | * | 7 | * |
7 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2, as | 9 | * it under the terms of the GNU General Public License version 2, as |
@@ -16,11 +17,13 @@ | |||
16 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
17 | #include <linux/jiffies.h> | 18 | #include <linux/jiffies.h> |
18 | #include <linux/err.h> | 19 | #include <linux/err.h> |
20 | #include <linux/cnt32_to_63.h> | ||
21 | #include <linux/timer.h> | ||
19 | #include <asm/mach/time.h> | 22 | #include <asm/mach/time.h> |
20 | 23 | ||
21 | #include <plat/mtu.h> | 24 | #include <plat/mtu.h> |
22 | 25 | ||
23 | void __iomem *mtu_base; /* ssigned by machine code */ | 26 | void __iomem *mtu_base; /* Assigned by machine code */ |
24 | 27 | ||
25 | /* | 28 | /* |
26 | * Kernel assumes that sched_clock can be called early | 29 | * Kernel assumes that sched_clock can be called early |
@@ -48,16 +51,82 @@ static struct clocksource nmdk_clksrc = { | |||
48 | /* | 51 | /* |
49 | * Override the global weak sched_clock symbol with this | 52 | * Override the global weak sched_clock symbol with this |
50 | * local implementation which uses the clocksource to get some | 53 | * local implementation which uses the clocksource to get some |
51 | * better resolution when scheduling the kernel. We accept that | 54 | * better resolution when scheduling the kernel. |
52 | * this wraps around for now, since it is just a relative time | 55 | * |
53 | * stamp. (Inspired by OMAP implementation.) | 56 | * Because the hardware timer period may be quite short |
57 | * (32.3 secs on the 133 MHz MTU timer selection on ux500) | ||
58 | * and because cnt32_to_63() needs to be called at least once per | ||
59 | * half period to work properly, a kernel keepwarm() timer is set up | ||
60 | * to ensure this requirement is always met. | ||
61 | * | ||
62 | * Also the sched_clock timer will wrap around at some point, | ||
63 | * here we set it to run continously for a year. | ||
54 | */ | 64 | */ |
65 | #define SCHED_CLOCK_MIN_WRAP 3600*24*365 | ||
66 | static struct timer_list cnt32_to_63_keepwarm_timer; | ||
67 | static u32 sched_mult; | ||
68 | static u32 sched_shift; | ||
69 | |||
55 | unsigned long long notrace sched_clock(void) | 70 | unsigned long long notrace sched_clock(void) |
56 | { | 71 | { |
57 | return clocksource_cyc2ns(nmdk_clksrc.read( | 72 | u64 cycles; |
58 | &nmdk_clksrc), | 73 | |
59 | nmdk_clksrc.mult, | 74 | if (unlikely(!mtu_base)) |
60 | nmdk_clksrc.shift); | 75 | return 0; |
76 | |||
77 | cycles = cnt32_to_63(-readl(mtu_base + MTU_VAL(0))); | ||
78 | /* | ||
79 | * sched_mult is guaranteed to be even so will | ||
80 | * shift out bit 63 | ||
81 | */ | ||
82 | return (cycles * sched_mult) >> sched_shift; | ||
83 | } | ||
84 | |||
85 | /* Just kick sched_clock every so often */ | ||
86 | static void cnt32_to_63_keepwarm(unsigned long data) | ||
87 | { | ||
88 | mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + data)); | ||
89 | (void) sched_clock(); | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Set up a timer to keep sched_clock():s 32_to_63 algorithm warm | ||
94 | * once in half a 32bit timer wrap interval. | ||
95 | */ | ||
96 | static void __init nmdk_sched_clock_init(unsigned long rate) | ||
97 | { | ||
98 | u32 v; | ||
99 | unsigned long delta; | ||
100 | u64 days; | ||
101 | |||
102 | /* Find the apropriate mult and shift factors */ | ||
103 | clocks_calc_mult_shift(&sched_mult, &sched_shift, | ||
104 | rate, NSEC_PER_SEC, SCHED_CLOCK_MIN_WRAP); | ||
105 | /* We need to multiply by an even number to get rid of bit 63 */ | ||
106 | if (sched_mult & 1) | ||
107 | sched_mult++; | ||
108 | |||
109 | /* Let's see what we get, take max counter and scale it */ | ||
110 | days = (0xFFFFFFFFFFFFFFFFLLU * sched_mult) >> sched_shift; | ||
111 | do_div(days, NSEC_PER_SEC); | ||
112 | do_div(days, (3600*24)); | ||
113 | |||
114 | pr_info("sched_clock: using %d bits @ %lu Hz wrap in %lu days\n", | ||
115 | (64 - sched_shift), rate, (unsigned long) days); | ||
116 | |||
117 | /* | ||
118 | * Program a timer to kick us at half 32bit wraparound | ||
119 | * Formula: seconds per wrap = (2^32) / f | ||
120 | */ | ||
121 | v = 0xFFFFFFFFUL / rate; | ||
122 | /* We want half of the wrap time to keep cnt32_to_63 warm */ | ||
123 | v /= 2; | ||
124 | pr_debug("sched_clock: prescaled timer rate: %lu Hz, " | ||
125 | "initialize keepwarm timer every %d seconds\n", rate, v); | ||
126 | /* Convert seconds to jiffies */ | ||
127 | delta = msecs_to_jiffies(v*1000); | ||
128 | setup_timer(&cnt32_to_63_keepwarm_timer, cnt32_to_63_keepwarm, delta); | ||
129 | mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + delta)); | ||
61 | } | 130 | } |
62 | 131 | ||
63 | /* Clockevent device: use one-shot mode */ | 132 | /* Clockevent device: use one-shot mode */ |
@@ -161,13 +230,15 @@ void __init nmdk_timer_init(void) | |||
161 | writel(0, mtu_base + MTU_BGLR(0)); | 230 | writel(0, mtu_base + MTU_BGLR(0)); |
162 | writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0)); | 231 | writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0)); |
163 | 232 | ||
164 | /* Now the scheduling clock is ready */ | 233 | /* Now the clock source is ready */ |
165 | nmdk_clksrc.read = nmdk_read_timer; | 234 | nmdk_clksrc.read = nmdk_read_timer; |
166 | 235 | ||
167 | if (clocksource_register(&nmdk_clksrc)) | 236 | if (clocksource_register(&nmdk_clksrc)) |
168 | pr_err("timer: failed to initialize clock source %s\n", | 237 | pr_err("timer: failed to initialize clock source %s\n", |
169 | nmdk_clksrc.name); | 238 | nmdk_clksrc.name); |
170 | 239 | ||
240 | nmdk_sched_clock_init(rate); | ||
241 | |||
171 | /* Timer 1 is used for events */ | 242 | /* Timer 1 is used for events */ |
172 | 243 | ||
173 | clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); | 244 | clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); |
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index f5c5b8da9a87..2c2826571d45 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c | |||
@@ -1983,6 +1983,8 @@ static int omap2_dma_handle_ch(int ch) | |||
1983 | 1983 | ||
1984 | dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(ch)); | 1984 | dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(ch)); |
1985 | dma_write(1 << ch, IRQSTATUS_L0); | 1985 | dma_write(1 << ch, IRQSTATUS_L0); |
1986 | /* read back the register to flush the write */ | ||
1987 | dma_read(IRQSTATUS_L0); | ||
1986 | 1988 | ||
1987 | /* If the ch is not chained then chain_id will be -1 */ | 1989 | /* If the ch is not chained then chain_id will be -1 */ |
1988 | if (dma_chan[ch].chain_id != -1) { | 1990 | if (dma_chan[ch].chain_id != -1) { |
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index cd0c090ebc54..b407bc8ad918 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c | |||
@@ -7,7 +7,6 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/smp_lock.h> | ||
11 | #include <linux/unistd.h> | 10 | #include <linux/unistd.h> |
12 | #include <linux/user.h> | 11 | #include <linux/user.h> |
13 | #include <linux/uaccess.h> | 12 | #include <linux/uaccess.h> |
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c index 2b63b0191f52..efad12071c2e 100644 --- a/arch/frv/kernel/process.c +++ b/arch/frv/kernel/process.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/smp_lock.h> | ||
20 | #include <linux/stddef.h> | 19 | #include <linux/stddef.h> |
21 | #include <linux/unistd.h> | 20 | #include <linux/unistd.h> |
22 | #include <linux/ptrace.h> | 21 | #include <linux/ptrace.h> |
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index 97478138e361..933bd388efb2 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
29 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
30 | #include <linux/smp.h> | 30 | #include <linux/smp.h> |
31 | #include <linux/smp_lock.h> | ||
32 | #include <linux/stddef.h> | 31 | #include <linux/stddef.h> |
33 | #include <linux/unistd.h> | 32 | #include <linux/unistd.h> |
34 | #include <linux/ptrace.h> | 33 | #include <linux/ptrace.h> |
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c index 3a078ad3aa44..331de723c676 100644 --- a/arch/ia64/hp/sim/simscsi.c +++ b/arch/ia64/hp/sim/simscsi.c | |||
@@ -202,7 +202,7 @@ simscsi_readwrite10 (struct scsi_cmnd *sc, int mode) | |||
202 | } | 202 | } |
203 | 203 | ||
204 | static int | 204 | static int |
205 | simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | 205 | simscsi_queuecommand_lck (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) |
206 | { | 206 | { |
207 | unsigned int target_id = sc->device->id; | 207 | unsigned int target_id = sc->device->id; |
208 | char fname[MAX_ROOT_LEN+16]; | 208 | char fname[MAX_ROOT_LEN+16]; |
@@ -326,6 +326,8 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | |||
326 | return 0; | 326 | return 0; |
327 | } | 327 | } |
328 | 328 | ||
329 | static DEF_SCSI_QCMD(simscsi_queuecommand) | ||
330 | |||
329 | static int | 331 | static int |
330 | simscsi_host_reset (struct scsi_cmnd *sc) | 332 | simscsi_host_reset (struct scsi_cmnd *sc) |
331 | { | 333 | { |
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index 18732ab23292..c2a1fc23dd75 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/fs.h> | 19 | #include <linux/fs.h> |
20 | #include <linux/smp.h> | 20 | #include <linux/smp.h> |
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/stddef.h> | 21 | #include <linux/stddef.h> |
23 | #include <linux/unistd.h> | 22 | #include <linux/unistd.h> |
24 | #include <linux/ptrace.h> | 23 | #include <linux/ptrace.h> |
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c index 6d3390590e5b..e2a63af5d517 100644 --- a/arch/m68knommu/kernel/process.c +++ b/arch/m68knommu/kernel/process.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
21 | #include <linux/smp.h> | 21 | #include <linux/smp.h> |
22 | #include <linux/smp_lock.h> | ||
23 | #include <linux/stddef.h> | 22 | #include <linux/stddef.h> |
24 | #include <linux/unistd.h> | 23 | #include <linux/unistd.h> |
25 | #include <linux/ptrace.h> | 24 | #include <linux/ptrace.h> |
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index 0d0f8049a17b..e1b14a6ed544 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
16 | #include <linux/smp.h> | 16 | #include <linux/smp.h> |
17 | #include <linux/smp_lock.h> | ||
18 | #include <linux/stddef.h> | 17 | #include <linux/stddef.h> |
19 | #include <linux/unistd.h> | 18 | #include <linux/unistd.h> |
20 | #include <linux/ptrace.h> | 19 | #include <linux/ptrace.h> |
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c index ba430a03bc7a..30394081d9b6 100644 --- a/arch/parisc/hpux/sys_hpux.c +++ b/arch/parisc/hpux/sys_hpux.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/namei.h> | 28 | #include <linux/namei.h> |
29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include <linux/smp_lock.h> | ||
32 | #include <linux/syscalls.h> | 31 | #include <linux/syscalls.h> |
33 | #include <linux/utsname.h> | 32 | #include <linux/utsname.h> |
34 | #include <linux/vfs.h> | 33 | #include <linux/vfs.h> |
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c index 9779ece2b070..88a0ad14a9c9 100644 --- a/arch/parisc/kernel/sys_parisc32.c +++ b/arch/parisc/kernel/sys_parisc32.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/times.h> | 20 | #include <linux/times.h> |
21 | #include <linux/time.h> | 21 | #include <linux/time.h> |
22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
23 | #include <linux/smp_lock.h> | ||
24 | #include <linux/sem.h> | 23 | #include <linux/sem.h> |
25 | #include <linux/msg.h> | 24 | #include <linux/msg.h> |
26 | #include <linux/shm.h> | 25 | #include <linux/shm.h> |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b6447190e1a2..e625e9e034ae 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -4,6 +4,10 @@ config PPC32 | |||
4 | bool | 4 | bool |
5 | default y if !PPC64 | 5 | default y if !PPC64 |
6 | 6 | ||
7 | config 32BIT | ||
8 | bool | ||
9 | default y if PPC32 | ||
10 | |||
7 | config 64BIT | 11 | config 64BIT |
8 | bool | 12 | bool |
9 | default y if PPC64 | 13 | default y if PPC64 |
diff --git a/arch/powerpc/boot/div64.S b/arch/powerpc/boot/div64.S index 722f360a32a9..d271ab542673 100644 --- a/arch/powerpc/boot/div64.S +++ b/arch/powerpc/boot/div64.S | |||
@@ -33,9 +33,10 @@ __div64_32: | |||
33 | cntlzw r0,r5 # we are shifting the dividend right | 33 | cntlzw r0,r5 # we are shifting the dividend right |
34 | li r10,-1 # to make it < 2^32, and shifting | 34 | li r10,-1 # to make it < 2^32, and shifting |
35 | srw r10,r10,r0 # the divisor right the same amount, | 35 | srw r10,r10,r0 # the divisor right the same amount, |
36 | add r9,r4,r10 # rounding up (so the estimate cannot | 36 | addc r9,r4,r10 # rounding up (so the estimate cannot |
37 | andc r11,r6,r10 # ever be too large, only too small) | 37 | andc r11,r6,r10 # ever be too large, only too small) |
38 | andc r9,r9,r10 | 38 | andc r9,r9,r10 |
39 | addze r9,r9 | ||
39 | or r11,r5,r11 | 40 | or r11,r5,r11 |
40 | rotlw r9,r9,r0 | 41 | rotlw r9,r9,r0 |
41 | rotlw r11,r11,r0 | 42 | rotlw r11,r11,r0 |
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index 7a9db64f3f04..42850ee00ada 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c | |||
@@ -337,7 +337,7 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) | |||
337 | /* FP registers 32 -> 63 */ | 337 | /* FP registers 32 -> 63 */ |
338 | #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE) | 338 | #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE) |
339 | if (current) | 339 | if (current) |
340 | memcpy(mem, current->thread.evr[regno-32], | 340 | memcpy(mem, ¤t->thread.evr[regno-32], |
341 | dbg_reg_def[regno].size); | 341 | dbg_reg_def[regno].size); |
342 | #else | 342 | #else |
343 | /* fp registers not used by kernel, leave zero */ | 343 | /* fp registers not used by kernel, leave zero */ |
@@ -362,7 +362,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) | |||
362 | if (regno >= 32 && regno < 64) { | 362 | if (regno >= 32 && regno < 64) { |
363 | /* FP registers 32 -> 63 */ | 363 | /* FP registers 32 -> 63 */ |
364 | #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE) | 364 | #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE) |
365 | memcpy(current->thread.evr[regno-32], mem, | 365 | memcpy(¤t->thread.evr[regno-32], mem, |
366 | dbg_reg_def[regno].size); | 366 | dbg_reg_def[regno].size); |
367 | #else | 367 | #else |
368 | /* fp registers not used by kernel, leave zero */ | 368 | /* fp registers not used by kernel, leave zero */ |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 2a178b0ebcdf..ce6f61c6f871 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -497,9 +497,8 @@ static void __init emergency_stack_init(void) | |||
497 | } | 497 | } |
498 | 498 | ||
499 | /* | 499 | /* |
500 | * Called into from start_kernel, after lock_kernel has been called. | 500 | * Called into from start_kernel this initializes bootmem, which is used |
501 | * Initializes bootmem, which is unsed to manage page allocation until | 501 | * to manage page allocation until mem_init is called. |
502 | * mem_init is called. | ||
503 | */ | 502 | */ |
504 | void __init setup_arch(char **cmdline_p) | 503 | void __init setup_arch(char **cmdline_p) |
505 | { | 504 | { |
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index b1b6043a56c4..4e5bf1edc0f2 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/resource.h> | 23 | #include <linux/resource.h> |
24 | #include <linux/times.h> | 24 | #include <linux/times.h> |
25 | #include <linux/smp.h> | 25 | #include <linux/smp.h> |
26 | #include <linux/smp_lock.h> | ||
27 | #include <linux/sem.h> | 26 | #include <linux/sem.h> |
28 | #include <linux/msg.h> | 27 | #include <linux/msg.h> |
29 | #include <linux/shm.h> | 28 | #include <linux/shm.h> |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 83f534d862db..5e9584405c45 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -1123,7 +1123,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
1123 | else | 1123 | else |
1124 | #endif /* CONFIG_PPC_HAS_HASH_64K */ | 1124 | #endif /* CONFIG_PPC_HAS_HASH_64K */ |
1125 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, | 1125 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, |
1126 | subpage_protection(pgdir, ea)); | 1126 | subpage_protection(mm, ea)); |
1127 | 1127 | ||
1128 | /* Dump some info in case of hash insertion failure, they should | 1128 | /* Dump some info in case of hash insertion failure, they should |
1129 | * never happen so it is really useful to know if/when they do | 1129 | * never happen so it is really useful to know if/when they do |
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index 8b04c54e596f..8526bd9d2aa3 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S | |||
@@ -138,8 +138,11 @@ | |||
138 | cmpldi cr0,r15,0 /* Check for user region */ | 138 | cmpldi cr0,r15,0 /* Check for user region */ |
139 | std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */ | 139 | std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */ |
140 | beq normal_tlb_miss | 140 | beq normal_tlb_miss |
141 | |||
142 | li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */ | ||
143 | oris r11,r11,_PAGE_ACCESSED@h | ||
141 | /* XXX replace the RMW cycles with immediate loads + writes */ | 144 | /* XXX replace the RMW cycles with immediate loads + writes */ |
142 | 1: mfspr r10,SPRN_MAS1 | 145 | mfspr r10,SPRN_MAS1 |
143 | cmpldi cr0,r15,8 /* Check for vmalloc region */ | 146 | cmpldi cr0,r15,8 /* Check for vmalloc region */ |
144 | rlwinm r10,r10,0,16,1 /* Clear TID */ | 147 | rlwinm r10,r10,0,16,1 /* Clear TID */ |
145 | mtspr SPRN_MAS1,r10 | 148 | mtspr SPRN_MAS1,r10 |
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 36c0c449a899..2a030d89bbc6 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c | |||
@@ -585,6 +585,6 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, | |||
585 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | 585 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); |
586 | 586 | ||
587 | /* Finally limit subsequent allocations */ | 587 | /* Finally limit subsequent allocations */ |
588 | memblock_set_current_limit(ppc64_memblock_base + ppc64_rma_size); | 588 | memblock_set_current_limit(first_memblock_base + ppc64_rma_size); |
589 | } | 589 | } |
590 | #endif /* CONFIG_PPC64 */ | 590 | #endif /* CONFIG_PPC64 */ |
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index c667f0f02c34..3139814f6439 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig | |||
@@ -47,6 +47,12 @@ config LPARCFG | |||
47 | config PPC_PSERIES_DEBUG | 47 | config PPC_PSERIES_DEBUG |
48 | depends on PPC_PSERIES && PPC_EARLY_DEBUG | 48 | depends on PPC_PSERIES && PPC_EARLY_DEBUG |
49 | bool "Enable extra debug logging in platforms/pseries" | 49 | bool "Enable extra debug logging in platforms/pseries" |
50 | help | ||
51 | Say Y here if you want the pseries core to produce a bunch of | ||
52 | debug messages to the system log. Select this if you are having a | ||
53 | problem with the pseries core and want to see more of what is | ||
54 | going on. This does not enable debugging in lpar.c, which must | ||
55 | be manually done due to its verbosity. | ||
50 | default y | 56 | default y |
51 | 57 | ||
52 | config PPC_SMLPAR | 58 | config PPC_SMLPAR |
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 34b7dc12e731..17a11c82e6f8 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c | |||
@@ -21,8 +21,6 @@ | |||
21 | * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com> | 21 | * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com> |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #undef DEBUG | ||
25 | |||
26 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
27 | #include <linux/init.h> | 25 | #include <linux/init.h> |
28 | #include <linux/list.h> | 26 | #include <linux/list.h> |
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 4b7a062dee15..5fcc92a12d3e 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c | |||
@@ -25,8 +25,6 @@ | |||
25 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 25 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #undef DEBUG | ||
29 | |||
30 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
31 | #include <asm/pci-bridge.h> | 29 | #include <asm/pci-bridge.h> |
32 | #include <asm/ppc-pci.h> | 30 | #include <asm/ppc-pci.h> |
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug index 45e0c6199f36..05221b13ffb1 100644 --- a/arch/s390/Kconfig.debug +++ b/arch/s390/Kconfig.debug | |||
@@ -6,6 +6,18 @@ config TRACE_IRQFLAGS_SUPPORT | |||
6 | 6 | ||
7 | source "lib/Kconfig.debug" | 7 | source "lib/Kconfig.debug" |
8 | 8 | ||
9 | config STRICT_DEVMEM | ||
10 | def_bool y | ||
11 | prompt "Filter access to /dev/mem" | ||
12 | ---help--- | ||
13 | This option restricts access to /dev/mem. If this option is | ||
14 | disabled, you allow userspace access to all memory, including | ||
15 | kernel and userspace memory. Accidental memory access is likely | ||
16 | to be disastrous. | ||
17 | Memory access is required for experts who want to debug the kernel. | ||
18 | |||
19 | If you are unsure, say Y. | ||
20 | |||
9 | config DEBUG_STRICT_USER_COPY_CHECKS | 21 | config DEBUG_STRICT_USER_COPY_CHECKS |
10 | bool "Strict user copy size checks" | 22 | bool "Strict user copy size checks" |
11 | ---help--- | 23 | ---help--- |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index a8729ea7e9ac..3c987e9ec8d6 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -130,6 +130,11 @@ struct page; | |||
130 | void arch_free_page(struct page *page, int order); | 130 | void arch_free_page(struct page *page, int order); |
131 | void arch_alloc_page(struct page *page, int order); | 131 | void arch_alloc_page(struct page *page, int order); |
132 | 132 | ||
133 | static inline int devmem_is_allowed(unsigned long pfn) | ||
134 | { | ||
135 | return 0; | ||
136 | } | ||
137 | |||
133 | #define HAVE_ARCH_FREE_PAGE | 138 | #define HAVE_ARCH_FREE_PAGE |
134 | #define HAVE_ARCH_ALLOC_PAGE | 139 | #define HAVE_ARCH_ALLOC_PAGE |
135 | 140 | ||
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 1e6449c79ab6..53acaa86dd94 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/resource.h> | 25 | #include <linux/resource.h> |
26 | #include <linux/times.h> | 26 | #include <linux/times.h> |
27 | #include <linux/smp.h> | 27 | #include <linux/smp.h> |
28 | #include <linux/smp_lock.h> | ||
29 | #include <linux/sem.h> | 28 | #include <linux/sem.h> |
30 | #include <linux/msg.h> | 29 | #include <linux/msg.h> |
31 | #include <linux/shm.h> | 30 | #include <linux/shm.h> |
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index d60fc4398516..2564793ec2b6 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/sections.h> | 30 | #include <asm/sections.h> |
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/hardirq.h> | ||
33 | 34 | ||
34 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | 35 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
35 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | 36 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
@@ -212,7 +213,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
212 | /* Set the PER control regs, turns on single step for this address */ | 213 | /* Set the PER control regs, turns on single step for this address */ |
213 | __ctl_load(kprobe_per_regs, 9, 11); | 214 | __ctl_load(kprobe_per_regs, 9, 11); |
214 | regs->psw.mask |= PSW_MASK_PER; | 215 | regs->psw.mask |= PSW_MASK_PER; |
215 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); | 216 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); |
216 | } | 217 | } |
217 | 218 | ||
218 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | 219 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) |
@@ -239,7 +240,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | |||
239 | __get_cpu_var(current_kprobe) = p; | 240 | __get_cpu_var(current_kprobe) = p; |
240 | /* Save the interrupt and per flags */ | 241 | /* Save the interrupt and per flags */ |
241 | kcb->kprobe_saved_imask = regs->psw.mask & | 242 | kcb->kprobe_saved_imask = regs->psw.mask & |
242 | (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); | 243 | (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); |
243 | /* Save the control regs that govern PER */ | 244 | /* Save the control regs that govern PER */ |
244 | __ctl_store(kcb->kprobe_saved_ctl, 9, 11); | 245 | __ctl_store(kcb->kprobe_saved_ctl, 9, 11); |
245 | } | 246 | } |
@@ -316,8 +317,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
316 | return 1; | 317 | return 1; |
317 | 318 | ||
318 | ss_probe: | 319 | ss_probe: |
319 | if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO)) | ||
320 | local_irq_disable(); | ||
321 | prepare_singlestep(p, regs); | 320 | prepare_singlestep(p, regs); |
322 | kcb->kprobe_status = KPROBE_HIT_SS; | 321 | kcb->kprobe_status = KPROBE_HIT_SS; |
323 | return 1; | 322 | return 1; |
@@ -350,6 +349,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, | |||
350 | struct hlist_node *node, *tmp; | 349 | struct hlist_node *node, *tmp; |
351 | unsigned long flags, orig_ret_address = 0; | 350 | unsigned long flags, orig_ret_address = 0; |
352 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; | 351 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; |
352 | kprobe_opcode_t *correct_ret_addr = NULL; | ||
353 | 353 | ||
354 | INIT_HLIST_HEAD(&empty_rp); | 354 | INIT_HLIST_HEAD(&empty_rp); |
355 | kretprobe_hash_lock(current, &head, &flags); | 355 | kretprobe_hash_lock(current, &head, &flags); |
@@ -372,10 +372,32 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, | |||
372 | /* another task is sharing our hash bucket */ | 372 | /* another task is sharing our hash bucket */ |
373 | continue; | 373 | continue; |
374 | 374 | ||
375 | if (ri->rp && ri->rp->handler) | 375 | orig_ret_address = (unsigned long)ri->ret_addr; |
376 | ri->rp->handler(ri, regs); | 376 | |
377 | if (orig_ret_address != trampoline_address) | ||
378 | /* | ||
379 | * This is the real return address. Any other | ||
380 | * instances associated with this task are for | ||
381 | * other calls deeper on the call stack | ||
382 | */ | ||
383 | break; | ||
384 | } | ||
385 | |||
386 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | ||
387 | |||
388 | correct_ret_addr = ri->ret_addr; | ||
389 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
390 | if (ri->task != current) | ||
391 | /* another task is sharing our hash bucket */ | ||
392 | continue; | ||
377 | 393 | ||
378 | orig_ret_address = (unsigned long)ri->ret_addr; | 394 | orig_ret_address = (unsigned long)ri->ret_addr; |
395 | |||
396 | if (ri->rp && ri->rp->handler) { | ||
397 | ri->ret_addr = correct_ret_addr; | ||
398 | ri->rp->handler(ri, regs); | ||
399 | } | ||
400 | |||
379 | recycle_rp_inst(ri, &empty_rp); | 401 | recycle_rp_inst(ri, &empty_rp); |
380 | 402 | ||
381 | if (orig_ret_address != trampoline_address) { | 403 | if (orig_ret_address != trampoline_address) { |
@@ -387,7 +409,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, | |||
387 | break; | 409 | break; |
388 | } | 410 | } |
389 | } | 411 | } |
390 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | 412 | |
391 | regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; | 413 | regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; |
392 | 414 | ||
393 | reset_current_kprobe(); | 415 | reset_current_kprobe(); |
@@ -465,8 +487,6 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs) | |||
465 | goto out; | 487 | goto out; |
466 | } | 488 | } |
467 | reset_current_kprobe(); | 489 | reset_current_kprobe(); |
468 | if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO)) | ||
469 | local_irq_enable(); | ||
470 | out: | 490 | out: |
471 | preempt_enable_no_resched(); | 491 | preempt_enable_no_resched(); |
472 | 492 | ||
@@ -482,7 +502,7 @@ out: | |||
482 | return 1; | 502 | return 1; |
483 | } | 503 | } |
484 | 504 | ||
485 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | 505 | static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) |
486 | { | 506 | { |
487 | struct kprobe *cur = kprobe_running(); | 507 | struct kprobe *cur = kprobe_running(); |
488 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 508 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
@@ -508,8 +528,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
508 | restore_previous_kprobe(kcb); | 528 | restore_previous_kprobe(kcb); |
509 | else { | 529 | else { |
510 | reset_current_kprobe(); | 530 | reset_current_kprobe(); |
511 | if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO)) | ||
512 | local_irq_enable(); | ||
513 | } | 531 | } |
514 | preempt_enable_no_resched(); | 532 | preempt_enable_no_resched(); |
515 | break; | 533 | break; |
@@ -553,6 +571,18 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
553 | return 0; | 571 | return 0; |
554 | } | 572 | } |
555 | 573 | ||
574 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | ||
575 | { | ||
576 | int ret; | ||
577 | |||
578 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) | ||
579 | local_irq_disable(); | ||
580 | ret = kprobe_trap_handler(regs, trapnr); | ||
581 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) | ||
582 | local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); | ||
583 | return ret; | ||
584 | } | ||
585 | |||
556 | /* | 586 | /* |
557 | * Wrapper routine to for handling exceptions. | 587 | * Wrapper routine to for handling exceptions. |
558 | */ | 588 | */ |
@@ -560,8 +590,12 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
560 | unsigned long val, void *data) | 590 | unsigned long val, void *data) |
561 | { | 591 | { |
562 | struct die_args *args = (struct die_args *)data; | 592 | struct die_args *args = (struct die_args *)data; |
593 | struct pt_regs *regs = args->regs; | ||
563 | int ret = NOTIFY_DONE; | 594 | int ret = NOTIFY_DONE; |
564 | 595 | ||
596 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) | ||
597 | local_irq_disable(); | ||
598 | |||
565 | switch (val) { | 599 | switch (val) { |
566 | case DIE_BPT: | 600 | case DIE_BPT: |
567 | if (kprobe_handler(args->regs)) | 601 | if (kprobe_handler(args->regs)) |
@@ -572,16 +606,17 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
572 | ret = NOTIFY_STOP; | 606 | ret = NOTIFY_STOP; |
573 | break; | 607 | break; |
574 | case DIE_TRAP: | 608 | case DIE_TRAP: |
575 | /* kprobe_running() needs smp_processor_id() */ | 609 | if (!preemptible() && kprobe_running() && |
576 | preempt_disable(); | 610 | kprobe_trap_handler(args->regs, args->trapnr)) |
577 | if (kprobe_running() && | ||
578 | kprobe_fault_handler(args->regs, args->trapnr)) | ||
579 | ret = NOTIFY_STOP; | 611 | ret = NOTIFY_STOP; |
580 | preempt_enable(); | ||
581 | break; | 612 | break; |
582 | default: | 613 | default: |
583 | break; | 614 | break; |
584 | } | 615 | } |
616 | |||
617 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) | ||
618 | local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); | ||
619 | |||
585 | return ret; | 620 | return ret; |
586 | } | 621 | } |
587 | 622 | ||
@@ -595,6 +630,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
595 | 630 | ||
596 | /* setup return addr to the jprobe handler routine */ | 631 | /* setup return addr to the jprobe handler routine */ |
597 | regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE; | 632 | regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE; |
633 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); | ||
598 | 634 | ||
599 | /* r14 is the function return address */ | 635 | /* r14 is the function return address */ |
600 | kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14]; | 636 | kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14]; |
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 38e641cdd977..45b405ca2567 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c | |||
@@ -20,18 +20,17 @@ | |||
20 | static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | 20 | static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, |
21 | unsigned long end, int write, struct page **pages, int *nr) | 21 | unsigned long end, int write, struct page **pages, int *nr) |
22 | { | 22 | { |
23 | unsigned long mask, result; | 23 | unsigned long mask; |
24 | pte_t *ptep, pte; | 24 | pte_t *ptep, pte; |
25 | struct page *page; | 25 | struct page *page; |
26 | 26 | ||
27 | result = write ? 0 : _PAGE_RO; | 27 | mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL; |
28 | mask = result | _PAGE_INVALID | _PAGE_SPECIAL; | ||
29 | 28 | ||
30 | ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); | 29 | ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); |
31 | do { | 30 | do { |
32 | pte = *ptep; | 31 | pte = *ptep; |
33 | barrier(); | 32 | barrier(); |
34 | if ((pte_val(pte) & mask) != result) | 33 | if ((pte_val(pte) & mask) != 0) |
35 | return 0; | 34 | return 0; |
36 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | 35 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
37 | page = pte_page(pte); | 36 | page = pte_page(pte); |
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 46d5179c9f49..e3c73cdd8c90 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h | |||
@@ -199,10 +199,13 @@ extern unsigned long get_wchan(struct task_struct *p); | |||
199 | #define ARCH_HAS_PREFETCHW | 199 | #define ARCH_HAS_PREFETCHW |
200 | static inline void prefetch(void *x) | 200 | static inline void prefetch(void *x) |
201 | { | 201 | { |
202 | __asm__ __volatile__ ("pref @%0\n\t" : : "r" (x) : "memory"); | 202 | __builtin_prefetch(x, 0, 3); |
203 | } | 203 | } |
204 | 204 | ||
205 | #define prefetchw(x) prefetch(x) | 205 | static inline void prefetchw(void *x) |
206 | { | ||
207 | __builtin_prefetch(x, 1, 3); | ||
208 | } | ||
206 | #endif | 209 | #endif |
207 | 210 | ||
208 | #endif /* __KERNEL__ */ | 211 | #endif /* __KERNEL__ */ |
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c index 4eabc68cd753..b601fa3978d1 100644 --- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c | |||
@@ -110,7 +110,7 @@ static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate) | |||
110 | return 0; | 110 | return 0; |
111 | } | 111 | } |
112 | 112 | ||
113 | static int shoc_clk_set_rate(struct clk *clk, unsigned long rate, int algo_id) | 113 | static int shoc_clk_set_rate(struct clk *clk, unsigned long rate) |
114 | { | 114 | { |
115 | unsigned long frqcr3; | 115 | unsigned long frqcr3; |
116 | unsigned int tmp; | 116 | unsigned int tmp; |
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c index 81f58371613d..8c6a350df751 100644 --- a/arch/sh/kernel/sys_sh.c +++ b/arch/sh/kernel/sys_sh.c | |||
@@ -88,7 +88,7 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op) | |||
88 | } | 88 | } |
89 | 89 | ||
90 | if (op & CACHEFLUSH_I) | 90 | if (op & CACHEFLUSH_I) |
91 | flush_cache_all(); | 91 | flush_icache_range(addr, addr+len); |
92 | 92 | ||
93 | up_read(¤t->mm->mmap_sem); | 93 | up_read(¤t->mm->mmap_sem); |
94 | return 0; | 94 | return 0; |
diff --git a/arch/sh/kernel/vsyscall/vsyscall-trapa.S b/arch/sh/kernel/vsyscall/vsyscall-trapa.S index 3b6eb34c43fa..3e70f851cdc6 100644 --- a/arch/sh/kernel/vsyscall/vsyscall-trapa.S +++ b/arch/sh/kernel/vsyscall/vsyscall-trapa.S | |||
@@ -8,9 +8,9 @@ __kernel_vsyscall: | |||
8 | * fill out .eh_frame -- PFM. */ | 8 | * fill out .eh_frame -- PFM. */ |
9 | .LEND_vsyscall: | 9 | .LEND_vsyscall: |
10 | .size __kernel_vsyscall,.-.LSTART_vsyscall | 10 | .size __kernel_vsyscall,.-.LSTART_vsyscall |
11 | .previous | ||
12 | 11 | ||
13 | .section .eh_frame,"a",@progbits | 12 | .section .eh_frame,"a",@progbits |
13 | .previous | ||
14 | .LCIE: | 14 | .LCIE: |
15 | .ualong .LCIE_end - .LCIE_start | 15 | .ualong .LCIE_end - .LCIE_start |
16 | .LCIE_start: | 16 | .LCIE_start: |
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index 7524689b03d2..16582d85368a 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/threads.h> | 13 | #include <linux/threads.h> |
14 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
15 | #include <linux/smp_lock.h> | ||
16 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
17 | #include <linux/kernel_stat.h> | 16 | #include <linux/kernel_stat.h> |
18 | #include <linux/init.h> | 17 | #include <linux/init.h> |
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c index e6375a750d9a..6db18c6927fb 100644 --- a/arch/sparc/kernel/sys_sparc32.c +++ b/arch/sparc/kernel/sys_sparc32.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/resource.h> | 17 | #include <linux/resource.h> |
18 | #include <linux/times.h> | 18 | #include <linux/times.h> |
19 | #include <linux/smp.h> | 19 | #include <linux/smp.h> |
20 | #include <linux/smp_lock.h> | ||
21 | #include <linux/sem.h> | 20 | #include <linux/sem.h> |
22 | #include <linux/msg.h> | 21 | #include <linux/msg.h> |
23 | #include <linux/shm.h> | 22 | #include <linux/shm.h> |
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index 675c9e11ada5..42b282fa6112 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/mman.h> | 19 | #include <linux/mman.h> |
20 | #include <linux/utsname.h> | 20 | #include <linux/utsname.h> |
21 | #include <linux/smp.h> | 21 | #include <linux/smp.h> |
22 | #include <linux/smp_lock.h> | ||
23 | #include <linux/ipc.h> | 22 | #include <linux/ipc.h> |
24 | 23 | ||
25 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index 12b9f352595f..4491f4cb2695 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <asm/system.h> | 16 | #include <asm/system.h> |
17 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/smp_lock.h> | ||
20 | #include <linux/perf_event.h> | 19 | #include <linux/perf_event.h> |
21 | 20 | ||
22 | enum direction { | 21 | enum direction { |
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c index b351770cbdd6..3107381e576d 100644 --- a/arch/sparc/kernel/windows.c +++ b/arch/sparc/kernel/windows.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/string.h> | 9 | #include <linux/string.h> |
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/smp.h> | 11 | #include <linux/smp.h> |
12 | #include <linux/smp_lock.h> | ||
13 | 12 | ||
14 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
15 | 14 | ||
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 07ec8a865c1d..e11b5fcb70eb 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig | |||
@@ -329,6 +329,18 @@ endmenu # Tilera-specific configuration | |||
329 | 329 | ||
330 | menu "Bus options" | 330 | menu "Bus options" |
331 | 331 | ||
332 | config PCI | ||
333 | bool "PCI support" | ||
334 | default y | ||
335 | select PCI_DOMAINS | ||
336 | ---help--- | ||
337 | Enable PCI root complex support, so PCIe endpoint devices can | ||
338 | be attached to the Tile chip. Many, but not all, PCI devices | ||
339 | are supported under Tilera's root complex driver. | ||
340 | |||
341 | config PCI_DOMAINS | ||
342 | bool | ||
343 | |||
332 | config NO_IOMEM | 344 | config NO_IOMEM |
333 | def_bool !PCI | 345 | def_bool !PCI |
334 | 346 | ||
diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h index c5741da4eeac..14a3f8556ace 100644 --- a/arch/tile/include/asm/cacheflush.h +++ b/arch/tile/include/asm/cacheflush.h | |||
@@ -137,4 +137,56 @@ static inline void finv_buffer(void *buffer, size_t size) | |||
137 | mb_incoherent(); | 137 | mb_incoherent(); |
138 | } | 138 | } |
139 | 139 | ||
140 | /* | ||
141 | * Flush & invalidate a VA range that is homed remotely on a single core, | ||
142 | * waiting until the memory controller holds the flushed values. | ||
143 | */ | ||
144 | static inline void finv_buffer_remote(void *buffer, size_t size) | ||
145 | { | ||
146 | char *p; | ||
147 | int i; | ||
148 | |||
149 | /* | ||
150 | * Flush and invalidate the buffer out of the local L1/L2 | ||
151 | * and request the home cache to flush and invalidate as well. | ||
152 | */ | ||
153 | __finv_buffer(buffer, size); | ||
154 | |||
155 | /* | ||
156 | * Wait for the home cache to acknowledge that it has processed | ||
157 | * all the flush-and-invalidate requests. This does not mean | ||
158 | * that the flushed data has reached the memory controller yet, | ||
159 | * but it does mean the home cache is processing the flushes. | ||
160 | */ | ||
161 | __insn_mf(); | ||
162 | |||
163 | /* | ||
164 | * Issue a load to the last cache line, which can't complete | ||
165 | * until all the previously-issued flushes to the same memory | ||
166 | * controller have also completed. If we weren't striping | ||
167 | * memory, that one load would be sufficient, but since we may | ||
168 | * be, we also need to back up to the last load issued to | ||
169 | * another memory controller, which would be the point where | ||
170 | * we crossed an 8KB boundary (the granularity of striping | ||
171 | * across memory controllers). Keep backing up and doing this | ||
172 | * until we are before the beginning of the buffer, or have | ||
173 | * hit all the controllers. | ||
174 | */ | ||
175 | for (i = 0, p = (char *)buffer + size - 1; | ||
176 | i < (1 << CHIP_LOG_NUM_MSHIMS()) && p >= (char *)buffer; | ||
177 | ++i) { | ||
178 | const unsigned long STRIPE_WIDTH = 8192; | ||
179 | |||
180 | /* Force a load instruction to issue. */ | ||
181 | *(volatile char *)p; | ||
182 | |||
183 | /* Jump to end of previous stripe. */ | ||
184 | p -= STRIPE_WIDTH; | ||
185 | p = (char *)((unsigned long)p | (STRIPE_WIDTH - 1)); | ||
186 | } | ||
187 | |||
188 | /* Wait for the loads (and thus flushes) to have completed. */ | ||
189 | __insn_mf(); | ||
190 | } | ||
191 | |||
140 | #endif /* _ASM_TILE_CACHEFLUSH_H */ | 192 | #endif /* _ASM_TILE_CACHEFLUSH_H */ |
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index ee43328713ab..d3cbb9b14cbe 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h | |||
@@ -55,9 +55,6 @@ extern void iounmap(volatile void __iomem *addr); | |||
55 | #define ioremap_writethrough(physaddr, size) ioremap(physaddr, size) | 55 | #define ioremap_writethrough(physaddr, size) ioremap(physaddr, size) |
56 | #define ioremap_fullcache(physaddr, size) ioremap(physaddr, size) | 56 | #define ioremap_fullcache(physaddr, size) ioremap(physaddr, size) |
57 | 57 | ||
58 | void __iomem *ioport_map(unsigned long port, unsigned int len); | ||
59 | extern inline void ioport_unmap(void __iomem *addr) {} | ||
60 | |||
61 | #define mmiowb() | 58 | #define mmiowb() |
62 | 59 | ||
63 | /* Conversion between virtual and physical mappings. */ | 60 | /* Conversion between virtual and physical mappings. */ |
@@ -189,12 +186,22 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, | |||
189 | * we never run, uses them unconditionally. | 186 | * we never run, uses them unconditionally. |
190 | */ | 187 | */ |
191 | 188 | ||
192 | static inline int ioport_panic(void) | 189 | static inline long ioport_panic(void) |
193 | { | 190 | { |
194 | panic("inb/outb and friends do not exist on tile"); | 191 | panic("inb/outb and friends do not exist on tile"); |
195 | return 0; | 192 | return 0; |
196 | } | 193 | } |
197 | 194 | ||
195 | static inline void __iomem *ioport_map(unsigned long port, unsigned int len) | ||
196 | { | ||
197 | return (void __iomem *) ioport_panic(); | ||
198 | } | ||
199 | |||
200 | static inline void ioport_unmap(void __iomem *addr) | ||
201 | { | ||
202 | ioport_panic(); | ||
203 | } | ||
204 | |||
198 | static inline u8 inb(unsigned long addr) | 205 | static inline u8 inb(unsigned long addr) |
199 | { | 206 | { |
200 | return ioport_panic(); | 207 | return ioport_panic(); |
diff --git a/arch/tile/include/asm/pci-bridge.h b/arch/tile/include/asm/pci-bridge.h deleted file mode 100644 index e853b0e2793b..000000000000 --- a/arch/tile/include/asm/pci-bridge.h +++ /dev/null | |||
@@ -1,117 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PCI_BRIDGE_H | ||
16 | #define _ASM_TILE_PCI_BRIDGE_H | ||
17 | |||
18 | #include <linux/ioport.h> | ||
19 | #include <linux/pci.h> | ||
20 | |||
21 | struct device_node; | ||
22 | struct pci_controller; | ||
23 | |||
24 | /* | ||
25 | * pci_io_base returns the memory address at which you can access | ||
26 | * the I/O space for PCI bus number `bus' (or NULL on error). | ||
27 | */ | ||
28 | extern void __iomem *pci_bus_io_base(unsigned int bus); | ||
29 | extern unsigned long pci_bus_io_base_phys(unsigned int bus); | ||
30 | extern unsigned long pci_bus_mem_base_phys(unsigned int bus); | ||
31 | |||
32 | /* Allocate a new PCI host bridge structure */ | ||
33 | extern struct pci_controller *pcibios_alloc_controller(void); | ||
34 | |||
35 | /* Helper function for setting up resources */ | ||
36 | extern void pci_init_resource(struct resource *res, unsigned long start, | ||
37 | unsigned long end, int flags, char *name); | ||
38 | |||
39 | /* Get the PCI host controller for a bus */ | ||
40 | extern struct pci_controller *pci_bus_to_hose(int bus); | ||
41 | |||
42 | /* | ||
43 | * Structure of a PCI controller (host bridge) | ||
44 | */ | ||
45 | struct pci_controller { | ||
46 | int index; /* PCI domain number */ | ||
47 | struct pci_bus *root_bus; | ||
48 | |||
49 | int first_busno; | ||
50 | int last_busno; | ||
51 | |||
52 | int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */ | ||
53 | int hv_mem_fd; /* fd to Hypervisor for MMIO operations */ | ||
54 | |||
55 | struct pci_ops *ops; | ||
56 | |||
57 | int irq_base; /* Base IRQ from the Hypervisor */ | ||
58 | int plx_gen1; /* flag for PLX Gen 1 configuration */ | ||
59 | |||
60 | /* Address ranges that are routed to this controller/bridge. */ | ||
61 | struct resource mem_resources[3]; | ||
62 | }; | ||
63 | |||
64 | static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus) | ||
65 | { | ||
66 | return bus->sysdata; | ||
67 | } | ||
68 | |||
69 | extern void setup_indirect_pci_nomap(struct pci_controller *hose, | ||
70 | void __iomem *cfg_addr, void __iomem *cfg_data); | ||
71 | extern void setup_indirect_pci(struct pci_controller *hose, | ||
72 | u32 cfg_addr, u32 cfg_data); | ||
73 | extern void setup_grackle(struct pci_controller *hose); | ||
74 | |||
75 | extern unsigned char common_swizzle(struct pci_dev *, unsigned char *); | ||
76 | |||
77 | /* | ||
78 | * The following code swizzles for exactly one bridge. The routine | ||
79 | * common_swizzle below handles multiple bridges. But there are a | ||
80 | * some boards that don't follow the PCI spec's suggestion so we | ||
81 | * break this piece out separately. | ||
82 | */ | ||
83 | static inline unsigned char bridge_swizzle(unsigned char pin, | ||
84 | unsigned char idsel) | ||
85 | { | ||
86 | return (((pin-1) + idsel) % 4) + 1; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * The following macro is used to lookup irqs in a standard table | ||
91 | * format for those PPC systems that do not already have PCI | ||
92 | * interrupts properly routed. | ||
93 | */ | ||
94 | /* FIXME - double check this */ | ||
95 | #define PCI_IRQ_TABLE_LOOKUP ({ \ | ||
96 | long _ctl_ = -1; \ | ||
97 | if (idsel >= min_idsel && idsel <= max_idsel && pin <= irqs_per_slot) \ | ||
98 | _ctl_ = pci_irq_table[idsel - min_idsel][pin-1]; \ | ||
99 | _ctl_; \ | ||
100 | }) | ||
101 | |||
102 | /* | ||
103 | * Scan the buses below a given PCI host bridge and assign suitable | ||
104 | * resources to all devices found. | ||
105 | */ | ||
106 | extern int pciauto_bus_scan(struct pci_controller *, int); | ||
107 | |||
108 | #ifdef CONFIG_PCI | ||
109 | extern unsigned long pci_address_to_pio(phys_addr_t address); | ||
110 | #else | ||
111 | static inline unsigned long pci_address_to_pio(phys_addr_t address) | ||
112 | { | ||
113 | return (unsigned long)-1; | ||
114 | } | ||
115 | #endif | ||
116 | |||
117 | #endif /* _ASM_TILE_PCI_BRIDGE_H */ | ||
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h index b0c15da2d5d5..c3fc458a0d32 100644 --- a/arch/tile/include/asm/pci.h +++ b/arch/tile/include/asm/pci.h | |||
@@ -15,7 +15,29 @@ | |||
15 | #ifndef _ASM_TILE_PCI_H | 15 | #ifndef _ASM_TILE_PCI_H |
16 | #define _ASM_TILE_PCI_H | 16 | #define _ASM_TILE_PCI_H |
17 | 17 | ||
18 | #include <asm/pci-bridge.h> | 18 | #include <linux/pci.h> |
19 | |||
20 | /* | ||
21 | * Structure of a PCI controller (host bridge) | ||
22 | */ | ||
23 | struct pci_controller { | ||
24 | int index; /* PCI domain number */ | ||
25 | struct pci_bus *root_bus; | ||
26 | |||
27 | int first_busno; | ||
28 | int last_busno; | ||
29 | |||
30 | int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */ | ||
31 | int hv_mem_fd; /* fd to Hypervisor for MMIO operations */ | ||
32 | |||
33 | struct pci_ops *ops; | ||
34 | |||
35 | int irq_base; /* Base IRQ from the Hypervisor */ | ||
36 | int plx_gen1; /* flag for PLX Gen 1 configuration */ | ||
37 | |||
38 | /* Address ranges that are routed to this controller/bridge. */ | ||
39 | struct resource mem_resources[3]; | ||
40 | }; | ||
19 | 41 | ||
20 | /* | 42 | /* |
21 | * The hypervisor maps the entirety of CPA-space as bus addresses, so | 43 | * The hypervisor maps the entirety of CPA-space as bus addresses, so |
@@ -24,56 +46,12 @@ | |||
24 | */ | 46 | */ |
25 | #define PCI_DMA_BUS_IS_PHYS 1 | 47 | #define PCI_DMA_BUS_IS_PHYS 1 |
26 | 48 | ||
27 | struct pci_controller *pci_bus_to_hose(int bus); | ||
28 | unsigned char __init common_swizzle(struct pci_dev *dev, unsigned char *pinp); | ||
29 | int __init tile_pci_init(void); | 49 | int __init tile_pci_init(void); |
30 | void pci_iounmap(struct pci_dev *dev, void __iomem *addr); | ||
31 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); | ||
32 | void __devinit pcibios_fixup_bus(struct pci_bus *bus); | ||
33 | 50 | ||
34 | int __devinit _tile_cfg_read(struct pci_controller *hose, | 51 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); |
35 | int bus, | 52 | static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} |
36 | int slot, | ||
37 | int function, | ||
38 | int offset, | ||
39 | int size, | ||
40 | u32 *val); | ||
41 | int __devinit _tile_cfg_write(struct pci_controller *hose, | ||
42 | int bus, | ||
43 | int slot, | ||
44 | int function, | ||
45 | int offset, | ||
46 | int size, | ||
47 | u32 val); | ||
48 | 53 | ||
49 | /* | 54 | void __devinit pcibios_fixup_bus(struct pci_bus *bus); |
50 | * These are used to to config reads and writes in the early stages of | ||
51 | * setup before the driver infrastructure has been set up enough to be | ||
52 | * able to do config reads and writes. | ||
53 | */ | ||
54 | #define early_cfg_read(where, size, value) \ | ||
55 | _tile_cfg_read(controller, \ | ||
56 | current_bus, \ | ||
57 | pci_slot, \ | ||
58 | pci_fn, \ | ||
59 | where, \ | ||
60 | size, \ | ||
61 | value) | ||
62 | |||
63 | #define early_cfg_write(where, size, value) \ | ||
64 | _tile_cfg_write(controller, \ | ||
65 | current_bus, \ | ||
66 | pci_slot, \ | ||
67 | pci_fn, \ | ||
68 | where, \ | ||
69 | size, \ | ||
70 | value) | ||
71 | |||
72 | |||
73 | |||
74 | #define PCICFG_BYTE 1 | ||
75 | #define PCICFG_WORD 2 | ||
76 | #define PCICFG_DWORD 4 | ||
77 | 55 | ||
78 | #define TILE_NUM_PCIE 2 | 56 | #define TILE_NUM_PCIE 2 |
79 | 57 | ||
@@ -88,33 +66,33 @@ static inline int pci_proc_domain(struct pci_bus *bus) | |||
88 | } | 66 | } |
89 | 67 | ||
90 | /* | 68 | /* |
91 | * I/O space is currently not supported. | 69 | * pcibios_assign_all_busses() tells whether or not the bus numbers |
70 | * should be reassigned, in case the BIOS didn't do it correctly, or | ||
71 | * in case we don't have a BIOS and we want to let Linux do it. | ||
92 | */ | 72 | */ |
73 | static inline int pcibios_assign_all_busses(void) | ||
74 | { | ||
75 | return 1; | ||
76 | } | ||
93 | 77 | ||
94 | #define TILE_PCIE_LOWER_IO 0x0 | 78 | /* |
95 | #define TILE_PCIE_UPPER_IO 0x10000 | 79 | * No special bus mastering setup handling. |
96 | #define TILE_PCIE_PCIE_IO_SIZE 0x0000FFFF | 80 | */ |
97 | |||
98 | #define _PAGE_NO_CACHE 0 | ||
99 | #define _PAGE_GUARDED 0 | ||
100 | |||
101 | |||
102 | #define pcibios_assign_all_busses() pci_assign_all_buses | ||
103 | extern int pci_assign_all_buses; | ||
104 | |||
105 | static inline void pcibios_set_master(struct pci_dev *dev) | 81 | static inline void pcibios_set_master(struct pci_dev *dev) |
106 | { | 82 | { |
107 | /* No special bus mastering setup handling */ | ||
108 | } | 83 | } |
109 | 84 | ||
110 | #define PCIBIOS_MIN_MEM 0 | 85 | #define PCIBIOS_MIN_MEM 0 |
111 | #define PCIBIOS_MIN_IO TILE_PCIE_LOWER_IO | 86 | #define PCIBIOS_MIN_IO 0 |
112 | 87 | ||
113 | /* | 88 | /* |
114 | * This flag tells if the platform is TILEmpower that needs | 89 | * This flag tells if the platform is TILEmpower that needs |
115 | * special configuration for the PLX switch chip. | 90 | * special configuration for the PLX switch chip. |
116 | */ | 91 | */ |
117 | extern int blade_pci; | 92 | extern int tile_plx_gen1; |
93 | |||
94 | /* Use any cpu for PCI. */ | ||
95 | #define cpumask_of_pcibus(bus) cpu_online_mask | ||
118 | 96 | ||
119 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ | 97 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ |
120 | #include <asm-generic/pci-dma-compat.h> | 98 | #include <asm-generic/pci-dma-compat.h> |
@@ -122,7 +100,4 @@ extern int blade_pci; | |||
122 | /* generic pci stuff */ | 100 | /* generic pci stuff */ |
123 | #include <asm-generic/pci.h> | 101 | #include <asm-generic/pci.h> |
124 | 102 | ||
125 | /* Use any cpu for PCI. */ | ||
126 | #define cpumask_of_pcibus(bus) cpu_online_mask | ||
127 | |||
128 | #endif /* _ASM_TILE_PCI_H */ | 103 | #endif /* _ASM_TILE_PCI_H */ |
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h index 1747ff3946b2..a9e7c8760334 100644 --- a/arch/tile/include/asm/processor.h +++ b/arch/tile/include/asm/processor.h | |||
@@ -292,8 +292,18 @@ extern int kstack_hash; | |||
292 | /* Are we using huge pages in the TLB for kernel data? */ | 292 | /* Are we using huge pages in the TLB for kernel data? */ |
293 | extern int kdata_huge; | 293 | extern int kdata_huge; |
294 | 294 | ||
295 | /* Support standard Linux prefetching. */ | ||
296 | #define ARCH_HAS_PREFETCH | ||
297 | #define prefetch(x) __builtin_prefetch(x) | ||
295 | #define PREFETCH_STRIDE CHIP_L2_LINE_SIZE() | 298 | #define PREFETCH_STRIDE CHIP_L2_LINE_SIZE() |
296 | 299 | ||
300 | /* Bring a value into the L1D, faulting the TLB if necessary. */ | ||
301 | #ifdef __tilegx__ | ||
302 | #define prefetch_L1(x) __insn_prefetch_l1_fault((void *)(x)) | ||
303 | #else | ||
304 | #define prefetch_L1(x) __insn_prefetch_L1((void *)(x)) | ||
305 | #endif | ||
306 | |||
297 | #else /* __ASSEMBLY__ */ | 307 | #else /* __ASSEMBLY__ */ |
298 | 308 | ||
299 | /* Do some slow action (e.g. read a slow SPR). */ | 309 | /* Do some slow action (e.g. read a slow SPR). */ |
diff --git a/arch/tile/include/hv/drv_xgbe_impl.h b/arch/tile/include/hv/drv_xgbe_impl.h new file mode 100644 index 000000000000..3a73b2b44913 --- /dev/null +++ b/arch/tile/include/hv/drv_xgbe_impl.h | |||
@@ -0,0 +1,300 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * @file drivers/xgbe/impl.h | ||
17 | * Implementation details for the NetIO library. | ||
18 | */ | ||
19 | |||
20 | #ifndef __DRV_XGBE_IMPL_H__ | ||
21 | #define __DRV_XGBE_IMPL_H__ | ||
22 | |||
23 | #include <hv/netio_errors.h> | ||
24 | #include <hv/netio_intf.h> | ||
25 | #include <hv/drv_xgbe_intf.h> | ||
26 | |||
27 | |||
28 | /** How many groups we have (log2). */ | ||
29 | #define LOG2_NUM_GROUPS (12) | ||
30 | /** How many groups we have. */ | ||
31 | #define NUM_GROUPS (1 << LOG2_NUM_GROUPS) | ||
32 | |||
33 | /** Number of output requests we'll buffer per tile. */ | ||
34 | #define EPP_REQS_PER_TILE (32) | ||
35 | |||
36 | /** Words used in an eDMA command without checksum acceleration. */ | ||
37 | #define EDMA_WDS_NO_CSUM 8 | ||
38 | /** Words used in an eDMA command with checksum acceleration. */ | ||
39 | #define EDMA_WDS_CSUM 10 | ||
40 | /** Total available words in the eDMA command FIFO. */ | ||
41 | #define EDMA_WDS_TOTAL 128 | ||
42 | |||
43 | |||
44 | /* | ||
45 | * FIXME: These definitions are internal and should have underscores! | ||
46 | * NOTE: The actual numeric values here are intentional and allow us to | ||
47 | * optimize the concept "if small ... else if large ... else ...", by | ||
48 | * checking for the low bit being set, and then for non-zero. | ||
49 | * These are used as array indices, so they must have the values (0, 1, 2) | ||
50 | * in some order. | ||
51 | */ | ||
52 | #define SIZE_SMALL (1) /**< Small packet queue. */ | ||
53 | #define SIZE_LARGE (2) /**< Large packet queue. */ | ||
54 | #define SIZE_JUMBO (0) /**< Jumbo packet queue. */ | ||
55 | |||
56 | /** The number of "SIZE_xxx" values. */ | ||
57 | #define NETIO_NUM_SIZES 3 | ||
58 | |||
59 | |||
60 | /* | ||
61 | * Default numbers of packets for IPP drivers. These values are chosen | ||
62 | * such that CIPP1 will not overflow its L2 cache. | ||
63 | */ | ||
64 | |||
65 | /** The default number of small packets. */ | ||
66 | #define NETIO_DEFAULT_SMALL_PACKETS 2750 | ||
67 | /** The default number of large packets. */ | ||
68 | #define NETIO_DEFAULT_LARGE_PACKETS 2500 | ||
69 | /** The default number of jumbo packets. */ | ||
70 | #define NETIO_DEFAULT_JUMBO_PACKETS 250 | ||
71 | |||
72 | |||
73 | /** Log2 of the size of a memory arena. */ | ||
74 | #define NETIO_ARENA_SHIFT 24 /* 16 MB */ | ||
75 | /** Size of a memory arena. */ | ||
76 | #define NETIO_ARENA_SIZE (1 << NETIO_ARENA_SHIFT) | ||
77 | |||
78 | |||
79 | /** A queue of packets. | ||
80 | * | ||
81 | * This structure partially defines a queue of packets waiting to be | ||
82 | * processed. The queue as a whole is written to by an interrupt handler and | ||
83 | * read by non-interrupt code; this data structure is what's touched by the | ||
84 | * interrupt handler. The other part of the queue state, the read offset, is | ||
85 | * kept in user space, not in hypervisor space, so it is in a separate data | ||
86 | * structure. | ||
87 | * | ||
88 | * The read offset (__packet_receive_read in the user part of the queue | ||
89 | * structure) points to the next packet to be read. When the read offset is | ||
90 | * equal to the write offset, the queue is empty; therefore the queue must | ||
91 | * contain one more slot than the required maximum queue size. | ||
92 | * | ||
93 | * Here's an example of all 3 state variables and what they mean. All | ||
94 | * pointers move left to right. | ||
95 | * | ||
96 | * @code | ||
97 | * I I V V V V I I I I | ||
98 | * 0 1 2 3 4 5 6 7 8 9 10 | ||
99 | * ^ ^ ^ ^ | ||
100 | * | | | | ||
101 | * | | __last_packet_plus_one | ||
102 | * | __buffer_write | ||
103 | * __packet_receive_read | ||
104 | * @endcode | ||
105 | * | ||
106 | * This queue has 10 slots, and thus can hold 9 packets (_last_packet_plus_one | ||
107 | * = 10). The read pointer is at 2, and the write pointer is at 6; thus, | ||
108 | * there are valid, unread packets in slots 2, 3, 4, and 5. The remaining | ||
109 | * slots are invalid (do not contain a packet). | ||
110 | */ | ||
111 | typedef struct { | ||
112 | /** Byte offset of the next notify packet to be written: zero for the first | ||
113 | * packet on the queue, sizeof (netio_pkt_t) for the second packet on the | ||
114 | * queue, etc. */ | ||
115 | volatile uint32_t __packet_write; | ||
116 | |||
117 | /** Offset of the packet after the last valid packet (i.e., when any | ||
118 | * pointer is incremented to this value, it wraps back to zero). */ | ||
119 | uint32_t __last_packet_plus_one; | ||
120 | } | ||
121 | __netio_packet_queue_t; | ||
122 | |||
123 | |||
124 | /** A queue of buffers. | ||
125 | * | ||
126 | * This structure partially defines a queue of empty buffers which have been | ||
127 | * obtained via requests to the IPP. (The elements of the queue are packet | ||
128 | * handles, which are transformed into a full netio_pkt_t when the buffer is | ||
129 | * retrieved.) The queue as a whole is written to by an interrupt handler and | ||
130 | * read by non-interrupt code; this data structure is what's touched by the | ||
131 | * interrupt handler. The other parts of the queue state, the read offset and | ||
132 | * requested write offset, are kept in user space, not in hypervisor space, so | ||
133 | * they are in a separate data structure. | ||
134 | * | ||
135 | * The read offset (__buffer_read in the user part of the queue structure) | ||
136 | * points to the next buffer to be read. When the read offset is equal to the | ||
137 | * write offset, the queue is empty; therefore the queue must contain one more | ||
138 | * slot than the required maximum queue size. | ||
139 | * | ||
140 | * The requested write offset (__buffer_requested_write in the user part of | ||
141 | * the queue structure) points to the slot which will hold the next buffer we | ||
142 | * request from the IPP, once we get around to sending such a request. When | ||
143 | * the requested write offset is equal to the write offset, no requests for | ||
144 | * new buffers are outstanding; when the requested write offset is one greater | ||
145 | * than the read offset, no more requests may be sent. | ||
146 | * | ||
147 | * Note that, unlike the packet_queue, the buffer_queue places incoming | ||
148 | * buffers at decreasing addresses. This makes the check for "is it time to | ||
149 | * wrap the buffer pointer" cheaper in the assembly code which receives new | ||
150 | * buffers, and means that the value which defines the queue size, | ||
151 | * __last_buffer, is different than in the packet queue. Also, the offset | ||
152 | * used in the packet_queue is already scaled by the size of a packet; here we | ||
153 | * use unscaled slot indices for the offsets. (These differences are | ||
154 | * historical, and in the future it's possible that the packet_queue will look | ||
155 | * more like this queue.) | ||
156 | * | ||
157 | * @code | ||
158 | * Here's an example of all 4 state variables and what they mean. Remember: | ||
159 | * all pointers move right to left. | ||
160 | * | ||
161 | * V V V I I R R V V V | ||
162 | * 0 1 2 3 4 5 6 7 8 9 | ||
163 | * ^ ^ ^ ^ | ||
164 | * | | | | | ||
165 | * | | | __last_buffer | ||
166 | * | | __buffer_write | ||
167 | * | __buffer_requested_write | ||
168 | * __buffer_read | ||
169 | * @endcode | ||
170 | * | ||
171 | * This queue has 10 slots, and thus can hold 9 buffers (_last_buffer = 9). | ||
172 | * The read pointer is at 2, and the write pointer is at 6; thus, there are | ||
173 | * valid, unread buffers in slots 2, 1, 0, 9, 8, and 7. The requested write | ||
174 | * pointer is at 4; thus, requests have been made to the IPP for buffers which | ||
175 | * will be placed in slots 6 and 5 when they arrive. Finally, the remaining | ||
176 | * slots are invalid (do not contain a buffer). | ||
177 | */ | ||
178 | typedef struct | ||
179 | { | ||
180 | /** Ordinal number of the next buffer to be written: 0 for the first slot in | ||
181 | * the queue, 1 for the second slot in the queue, etc. */ | ||
182 | volatile uint32_t __buffer_write; | ||
183 | |||
184 | /** Ordinal number of the last buffer (i.e., when any pointer is decremented | ||
185 | * below zero, it is reloaded with this value). */ | ||
186 | uint32_t __last_buffer; | ||
187 | } | ||
188 | __netio_buffer_queue_t; | ||
189 | |||
190 | |||
191 | /** | ||
192 | * An object for providing Ethernet packets to a process. | ||
193 | */ | ||
194 | typedef struct __netio_queue_impl_t | ||
195 | { | ||
196 | /** The queue of packets waiting to be received. */ | ||
197 | __netio_packet_queue_t __packet_receive_queue; | ||
198 | /** The intr bit mask that IDs this device. */ | ||
199 | unsigned int __intr_id; | ||
200 | /** Offset to queues of empty buffers, one per size. */ | ||
201 | uint32_t __buffer_queue[NETIO_NUM_SIZES]; | ||
202 | /** The address of the first EPP tile, or -1 if no EPP. */ | ||
203 | /* ISSUE: Actually this is always "0" or "~0". */ | ||
204 | uint32_t __epp_location; | ||
205 | /** The queue ID that this queue represents. */ | ||
206 | unsigned int __queue_id; | ||
207 | /** Number of acknowledgements received. */ | ||
208 | volatile uint32_t __acks_received; | ||
209 | /** Last completion number received for packet_sendv. */ | ||
210 | volatile uint32_t __last_completion_rcv; | ||
211 | /** Number of packets allowed to be outstanding. */ | ||
212 | uint32_t __max_outstanding; | ||
213 | /** First VA available for packets. */ | ||
214 | void* __va_0; | ||
215 | /** First VA in second range available for packets. */ | ||
216 | void* __va_1; | ||
217 | /** Padding to align the "__packets" field to the size of a netio_pkt_t. */ | ||
218 | uint32_t __padding[3]; | ||
219 | /** The packets themselves. */ | ||
220 | netio_pkt_t __packets[0]; | ||
221 | } | ||
222 | netio_queue_impl_t; | ||
223 | |||
224 | |||
225 | /** | ||
226 | * An object for managing the user end of a NetIO queue. | ||
227 | */ | ||
228 | typedef struct __netio_queue_user_impl_t | ||
229 | { | ||
230 | /** The next incoming packet to be read. */ | ||
231 | uint32_t __packet_receive_read; | ||
232 | /** The next empty buffers to be read, one index per size. */ | ||
233 | uint8_t __buffer_read[NETIO_NUM_SIZES]; | ||
234 | /** Where the empty buffer we next request from the IPP will go, one index | ||
235 | * per size. */ | ||
236 | uint8_t __buffer_requested_write[NETIO_NUM_SIZES]; | ||
237 | /** PCIe interface flag. */ | ||
238 | uint8_t __pcie; | ||
239 | /** Number of packets left to be received before we send a credit update. */ | ||
240 | uint32_t __receive_credit_remaining; | ||
241 | /** Value placed in __receive_credit_remaining when it reaches zero. */ | ||
242 | uint32_t __receive_credit_interval; | ||
243 | /** First fast I/O routine index. */ | ||
244 | uint32_t __fastio_index; | ||
245 | /** Number of acknowledgements expected. */ | ||
246 | uint32_t __acks_outstanding; | ||
247 | /** Last completion number requested. */ | ||
248 | uint32_t __last_completion_req; | ||
249 | /** File descriptor for driver. */ | ||
250 | int __fd; | ||
251 | } | ||
252 | netio_queue_user_impl_t; | ||
253 | |||
254 | |||
255 | #define NETIO_GROUP_CHUNK_SIZE 64 /**< Max # groups in one IPP request */ | ||
256 | #define NETIO_BUCKET_CHUNK_SIZE 64 /**< Max # buckets in one IPP request */ | ||
257 | |||
258 | |||
259 | /** Internal structure used to convey packet send information to the | ||
260 | * hypervisor. FIXME: Actually, it's not used for that anymore, but | ||
261 | * netio_packet_send() still uses it internally. | ||
262 | */ | ||
263 | typedef struct | ||
264 | { | ||
265 | uint16_t flags; /**< Packet flags (__NETIO_SEND_FLG_xxx) */ | ||
266 | uint16_t transfer_size; /**< Size of packet */ | ||
267 | uint32_t va; /**< VA of start of packet */ | ||
268 | __netio_pkt_handle_t handle; /**< Packet handle */ | ||
269 | uint32_t csum0; /**< First checksum word */ | ||
270 | uint32_t csum1; /**< Second checksum word */ | ||
271 | } | ||
272 | __netio_send_cmd_t; | ||
273 | |||
274 | |||
275 | /** Flags used in two contexts: | ||
276 | * - As the "flags" member in the __netio_send_cmd_t, above; used only | ||
277 | * for netio_pkt_send_{prepare,commit}. | ||
278 | * - As part of the flags passed to the various send packet fast I/O calls. | ||
279 | */ | ||
280 | |||
281 | /** Need acknowledgement on this packet. Note that some code in the | ||
282 | * normal send_pkt fast I/O handler assumes that this is equal to 1. */ | ||
283 | #define __NETIO_SEND_FLG_ACK 0x1 | ||
284 | |||
285 | /** Do checksum on this packet. (Only used with the __netio_send_cmd_t; | ||
286 | * normal packet sends use a special fast I/O index to denote checksumming, | ||
287 | * and multi-segment sends test the checksum descriptor.) */ | ||
288 | #define __NETIO_SEND_FLG_CSUM 0x2 | ||
289 | |||
290 | /** Get a completion on this packet. Only used with multi-segment sends. */ | ||
291 | #define __NETIO_SEND_FLG_COMPLETION 0x4 | ||
292 | |||
293 | /** Position of the number-of-extra-segments value in the flags word. | ||
294 | Only used with multi-segment sends. */ | ||
295 | #define __NETIO_SEND_FLG_XSEG_SHIFT 3 | ||
296 | |||
297 | /** Width of the number-of-extra-segments value in the flags word. */ | ||
298 | #define __NETIO_SEND_FLG_XSEG_WIDTH 2 | ||
299 | |||
300 | #endif /* __DRV_XGBE_IMPL_H__ */ | ||
diff --git a/arch/tile/include/hv/drv_xgbe_intf.h b/arch/tile/include/hv/drv_xgbe_intf.h new file mode 100644 index 000000000000..146e47d5334b --- /dev/null +++ b/arch/tile/include/hv/drv_xgbe_intf.h | |||
@@ -0,0 +1,615 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * @file drv_xgbe_intf.h | ||
17 | * Interface to the hypervisor XGBE driver. | ||
18 | */ | ||
19 | |||
20 | #ifndef __DRV_XGBE_INTF_H__ | ||
21 | #define __DRV_XGBE_INTF_H__ | ||
22 | |||
23 | /** | ||
24 | * An object for forwarding VAs and PAs to the hypervisor. | ||
25 | * @ingroup types | ||
26 | * | ||
27 | * This allows the supervisor to specify a number of areas of memory to | ||
28 | * store packet buffers. | ||
29 | */ | ||
30 | typedef struct | ||
31 | { | ||
32 | /** The physical address of the memory. */ | ||
33 | HV_PhysAddr pa; | ||
34 | /** Page table entry for the memory. This is only used to derive the | ||
35 | * memory's caching mode; the PA bits are ignored. */ | ||
36 | HV_PTE pte; | ||
37 | /** The virtual address of the memory. */ | ||
38 | HV_VirtAddr va; | ||
39 | /** Size (in bytes) of the memory area. */ | ||
40 | int size; | ||
41 | |||
42 | } | ||
43 | netio_ipp_address_t; | ||
44 | |||
45 | /** The various pread/pwrite offsets into the hypervisor-level driver. | ||
46 | * @ingroup types | ||
47 | */ | ||
48 | typedef enum | ||
49 | { | ||
50 | /** Inform the Linux driver of the address of the NetIO arena memory. | ||
51 | * This offset is actually only used to convey information from netio | ||
52 | * to the Linux driver; it never makes it from there to the hypervisor. | ||
53 | * Write-only; takes a uint32_t specifying the VA address. */ | ||
54 | NETIO_FIXED_ADDR = 0x5000000000000000ULL, | ||
55 | |||
56 | /** Inform the Linux driver of the size of the NetIO arena memory. | ||
57 | * This offset is actually only used to convey information from netio | ||
58 | * to the Linux driver; it never makes it from there to the hypervisor. | ||
59 | * Write-only; takes a uint32_t specifying the VA size. */ | ||
60 | NETIO_FIXED_SIZE = 0x5100000000000000ULL, | ||
61 | |||
62 | /** Register current tile with IPP. Write then read: write, takes a | ||
63 | * netio_input_config_t, read returns a pointer to a netio_queue_impl_t. */ | ||
64 | NETIO_IPP_INPUT_REGISTER_OFF = 0x6000000000000000ULL, | ||
65 | |||
66 | /** Unregister current tile from IPP. Write-only, takes a dummy argument. */ | ||
67 | NETIO_IPP_INPUT_UNREGISTER_OFF = 0x6100000000000000ULL, | ||
68 | |||
69 | /** Start packets flowing. Write-only, takes a dummy argument. */ | ||
70 | NETIO_IPP_INPUT_INIT_OFF = 0x6200000000000000ULL, | ||
71 | |||
72 | /** Stop packets flowing. Write-only, takes a dummy argument. */ | ||
73 | NETIO_IPP_INPUT_UNINIT_OFF = 0x6300000000000000ULL, | ||
74 | |||
75 | /** Configure group (typically we group on VLAN). Write-only: takes an | ||
76 | * array of netio_group_t's, low 24 bits of the offset is the base group | ||
77 | * number times the size of a netio_group_t. */ | ||
78 | NETIO_IPP_INPUT_GROUP_CFG_OFF = 0x6400000000000000ULL, | ||
79 | |||
80 | /** Configure bucket. Write-only: takes an array of netio_bucket_t's, low | ||
81 | * 24 bits of the offset is the base bucket number times the size of a | ||
82 | * netio_bucket_t. */ | ||
83 | NETIO_IPP_INPUT_BUCKET_CFG_OFF = 0x6500000000000000ULL, | ||
84 | |||
85 | /** Get/set a parameter. Read or write: read or write data is the parameter | ||
86 | * value, low 32 bits of the offset is a __netio_getset_offset_t. */ | ||
87 | NETIO_IPP_PARAM_OFF = 0x6600000000000000ULL, | ||
88 | |||
89 | /** Get fast I/O index. Read-only; returns a 4-byte base index value. */ | ||
90 | NETIO_IPP_GET_FASTIO_OFF = 0x6700000000000000ULL, | ||
91 | |||
92 | /** Configure hijack IP address. Packets with this IPv4 dest address | ||
93 | * go to bucket NETIO_NUM_BUCKETS - 1. Write-only: takes an IP address | ||
94 | * in some standard form. FIXME: Define the form! */ | ||
95 | NETIO_IPP_INPUT_HIJACK_CFG_OFF = 0x6800000000000000ULL, | ||
96 | |||
97 | /** | ||
98 | * Offsets beyond this point are reserved for the supervisor (although that | ||
99 | * enforcement must be done by the supervisor driver itself). | ||
100 | */ | ||
101 | NETIO_IPP_USER_MAX_OFF = 0x6FFFFFFFFFFFFFFFULL, | ||
102 | |||
103 | /** Register I/O memory. Write-only, takes a netio_ipp_address_t. */ | ||
104 | NETIO_IPP_IOMEM_REGISTER_OFF = 0x7000000000000000ULL, | ||
105 | |||
106 | /** Unregister I/O memory. Write-only, takes a netio_ipp_address_t. */ | ||
107 | NETIO_IPP_IOMEM_UNREGISTER_OFF = 0x7100000000000000ULL, | ||
108 | |||
109 | /* Offsets greater than 0x7FFFFFFF can't be used directly from Linux | ||
110 | * userspace code due to limitations in the pread/pwrite syscalls. */ | ||
111 | |||
112 | /** Drain LIPP buffers. */ | ||
113 | NETIO_IPP_DRAIN_OFF = 0xFA00000000000000ULL, | ||
114 | |||
115 | /** Supply a netio_ipp_address_t to be used as shared memory for the | ||
116 | * LEPP command queue. */ | ||
117 | NETIO_EPP_SHM_OFF = 0xFB00000000000000ULL, | ||
118 | |||
119 | /* 0xFC... is currently unused. */ | ||
120 | |||
121 | /** Stop IPP/EPP tiles. Write-only, takes a dummy argument. */ | ||
122 | NETIO_IPP_STOP_SHIM_OFF = 0xFD00000000000000ULL, | ||
123 | |||
124 | /** Start IPP/EPP tiles. Write-only, takes a dummy argument. */ | ||
125 | NETIO_IPP_START_SHIM_OFF = 0xFE00000000000000ULL, | ||
126 | |||
127 | /** Supply packet arena. Write-only, takes an array of | ||
128 | * netio_ipp_address_t values. */ | ||
129 | NETIO_IPP_ADDRESS_OFF = 0xFF00000000000000ULL, | ||
130 | } netio_hv_offset_t; | ||
131 | |||
132 | /** Extract the base offset from an offset */ | ||
133 | #define NETIO_BASE_OFFSET(off) ((off) & 0xFF00000000000000ULL) | ||
134 | /** Extract the local offset from an offset */ | ||
135 | #define NETIO_LOCAL_OFFSET(off) ((off) & 0x00FFFFFFFFFFFFFFULL) | ||
136 | |||
137 | |||
138 | /** | ||
139 | * Get/set offset. | ||
140 | */ | ||
141 | typedef union | ||
142 | { | ||
143 | struct | ||
144 | { | ||
145 | uint64_t addr:48; /**< Class-specific address */ | ||
146 | unsigned int class:8; /**< Class (e.g., NETIO_PARAM) */ | ||
147 | unsigned int opcode:8; /**< High 8 bits of NETIO_IPP_PARAM_OFF */ | ||
148 | } | ||
149 | bits; /**< Bitfields */ | ||
150 | uint64_t word; /**< Aggregated value to use as the offset */ | ||
151 | } | ||
152 | __netio_getset_offset_t; | ||
153 | |||
154 | /** | ||
155 | * Fast I/O index offsets (must be contiguous). | ||
156 | */ | ||
157 | typedef enum | ||
158 | { | ||
159 | NETIO_FASTIO_ALLOCATE = 0, /**< Get empty packet buffer */ | ||
160 | NETIO_FASTIO_FREE_BUFFER = 1, /**< Give buffer back to IPP */ | ||
161 | NETIO_FASTIO_RETURN_CREDITS = 2, /**< Give credits to IPP */ | ||
162 | NETIO_FASTIO_SEND_PKT_NOCK = 3, /**< Send a packet, no checksum */ | ||
163 | NETIO_FASTIO_SEND_PKT_CK = 4, /**< Send a packet, with checksum */ | ||
164 | NETIO_FASTIO_SEND_PKT_VEC = 5, /**< Send a vector of packets */ | ||
165 | NETIO_FASTIO_SENDV_PKT = 6, /**< Sendv one packet */ | ||
166 | NETIO_FASTIO_NUM_INDEX = 7, /**< Total number of fast I/O indices */ | ||
167 | } netio_fastio_index_t; | ||
168 | |||
169 | /** 3-word return type for Fast I/O call. */ | ||
170 | typedef struct | ||
171 | { | ||
172 | int err; /**< Error code. */ | ||
173 | uint32_t val0; /**< Value. Meaning depends upon the specific call. */ | ||
174 | uint32_t val1; /**< Value. Meaning depends upon the specific call. */ | ||
175 | } netio_fastio_rv3_t; | ||
176 | |||
177 | /** 0-argument fast I/O call */ | ||
178 | int __netio_fastio0(uint32_t fastio_index); | ||
179 | /** 1-argument fast I/O call */ | ||
180 | int __netio_fastio1(uint32_t fastio_index, uint32_t arg0); | ||
181 | /** 3-argument fast I/O call, 2-word return value */ | ||
182 | netio_fastio_rv3_t __netio_fastio3_rv3(uint32_t fastio_index, uint32_t arg0, | ||
183 | uint32_t arg1, uint32_t arg2); | ||
184 | /** 4-argument fast I/O call */ | ||
185 | int __netio_fastio4(uint32_t fastio_index, uint32_t arg0, uint32_t arg1, | ||
186 | uint32_t arg2, uint32_t arg3); | ||
187 | /** 6-argument fast I/O call */ | ||
188 | int __netio_fastio6(uint32_t fastio_index, uint32_t arg0, uint32_t arg1, | ||
189 | uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5); | ||
190 | /** 9-argument fast I/O call */ | ||
191 | int __netio_fastio9(uint32_t fastio_index, uint32_t arg0, uint32_t arg1, | ||
192 | uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5, | ||
193 | uint32_t arg6, uint32_t arg7, uint32_t arg8); | ||
194 | |||
195 | /** Allocate an empty packet. | ||
196 | * @param fastio_index Fast I/O index. | ||
197 | * @param size Size of the packet to allocate. | ||
198 | */ | ||
199 | #define __netio_fastio_allocate(fastio_index, size) \ | ||
200 | __netio_fastio1((fastio_index) + NETIO_FASTIO_ALLOCATE, size) | ||
201 | |||
202 | /** Free a buffer. | ||
203 | * @param fastio_index Fast I/O index. | ||
204 | * @param handle Handle for the packet to free. | ||
205 | */ | ||
206 | #define __netio_fastio_free_buffer(fastio_index, handle) \ | ||
207 | __netio_fastio1((fastio_index) + NETIO_FASTIO_FREE_BUFFER, handle) | ||
208 | |||
209 | /** Increment our receive credits. | ||
210 | * @param fastio_index Fast I/O index. | ||
211 | * @param credits Number of credits to add. | ||
212 | */ | ||
213 | #define __netio_fastio_return_credits(fastio_index, credits) \ | ||
214 | __netio_fastio1((fastio_index) + NETIO_FASTIO_RETURN_CREDITS, credits) | ||
215 | |||
216 | /** Send packet, no checksum. | ||
217 | * @param fastio_index Fast I/O index. | ||
218 | * @param ackflag Nonzero if we want an ack. | ||
219 | * @param size Size of the packet. | ||
220 | * @param va Virtual address of start of packet. | ||
221 | * @param handle Packet handle. | ||
222 | */ | ||
223 | #define __netio_fastio_send_pkt_nock(fastio_index, ackflag, size, va, handle) \ | ||
224 | __netio_fastio4((fastio_index) + NETIO_FASTIO_SEND_PKT_NOCK, ackflag, \ | ||
225 | size, va, handle) | ||
226 | |||
227 | /** Send packet, calculate checksum. | ||
228 | * @param fastio_index Fast I/O index. | ||
229 | * @param ackflag Nonzero if we want an ack. | ||
230 | * @param size Size of the packet. | ||
231 | * @param va Virtual address of start of packet. | ||
232 | * @param handle Packet handle. | ||
233 | * @param csum0 Shim checksum header. | ||
234 | * @param csum1 Checksum seed. | ||
235 | */ | ||
236 | #define __netio_fastio_send_pkt_ck(fastio_index, ackflag, size, va, handle, \ | ||
237 | csum0, csum1) \ | ||
238 | __netio_fastio6((fastio_index) + NETIO_FASTIO_SEND_PKT_CK, ackflag, \ | ||
239 | size, va, handle, csum0, csum1) | ||
240 | |||
241 | |||
242 | /** Format for the "csum0" argument to the __netio_fastio_send routines | ||
243 | * and LEPP. Note that this is currently exactly identical to the | ||
244 | * ShimProtocolOffloadHeader. | ||
245 | */ | ||
246 | typedef union | ||
247 | { | ||
248 | struct | ||
249 | { | ||
250 | unsigned int start_byte:7; /**< The first byte to be checksummed */ | ||
251 | unsigned int count:14; /**< Number of bytes to be checksummed. */ | ||
252 | unsigned int destination_byte:7; /**< The byte to write the checksum to. */ | ||
253 | unsigned int reserved:4; /**< Reserved. */ | ||
254 | } bits; /**< Decomposed method of access. */ | ||
255 | unsigned int word; /**< To send out the IDN. */ | ||
256 | } __netio_checksum_header_t; | ||
257 | |||
258 | |||
259 | /** Sendv packet with 1 or 2 segments. | ||
260 | * @param fastio_index Fast I/O index. | ||
261 | * @param flags Ack/csum/notify flags in low 3 bits; number of segments minus | ||
262 | * 1 in next 2 bits; expected checksum in high 16 bits. | ||
263 | * @param confno Confirmation number to request, if notify flag set. | ||
264 | * @param csum0 Checksum descriptor; if zero, no checksum. | ||
265 | * @param va_F Virtual address of first segment. | ||
266 | * @param va_L Virtual address of last segment, if 2 segments. | ||
267 | * @param len_F_L Length of first segment in low 16 bits; length of last | ||
268 | * segment, if 2 segments, in high 16 bits. | ||
269 | */ | ||
270 | #define __netio_fastio_sendv_pkt_1_2(fastio_index, flags, confno, csum0, \ | ||
271 | va_F, va_L, len_F_L) \ | ||
272 | __netio_fastio6((fastio_index) + NETIO_FASTIO_SENDV_PKT, flags, confno, \ | ||
273 | csum0, va_F, va_L, len_F_L) | ||
274 | |||
275 | /** Send packet on PCIe interface. | ||
276 | * @param fastio_index Fast I/O index. | ||
277 | * @param flags Ack/csum/notify flags in low 3 bits. | ||
278 | * @param confno Confirmation number to request, if notify flag set. | ||
279 | * @param csum0 Checksum descriptor; Hard wired 0, not needed for PCIe. | ||
280 | * @param va_F Virtual address of the packet buffer. | ||
281 | * @param va_L Virtual address of last segment, if 2 segments. Hard wired 0. | ||
282 | * @param len_F_L Length of the packet buffer in low 16 bits. | ||
283 | */ | ||
284 | #define __netio_fastio_send_pcie_pkt(fastio_index, flags, confno, csum0, \ | ||
285 | va_F, va_L, len_F_L) \ | ||
286 | __netio_fastio6((fastio_index) + PCIE_FASTIO_SENDV_PKT, flags, confno, \ | ||
287 | csum0, va_F, va_L, len_F_L) | ||
288 | |||
289 | /** Sendv packet with 3 or 4 segments. | ||
290 | * @param fastio_index Fast I/O index. | ||
291 | * @param flags Ack/csum/notify flags in low 3 bits; number of segments minus | ||
292 | * 1 in next 2 bits; expected checksum in high 16 bits. | ||
293 | * @param confno Confirmation number to request, if notify flag set. | ||
294 | * @param csum0 Checksum descriptor; if zero, no checksum. | ||
295 | * @param va_F Virtual address of first segment. | ||
296 | * @param va_L Virtual address of last segment (third segment if 3 segments, | ||
297 | * fourth segment if 4 segments). | ||
298 | * @param len_F_L Length of first segment in low 16 bits; length of last | ||
299 | * segment in high 16 bits. | ||
300 | * @param va_M0 Virtual address of "middle 0" segment; this segment is sent | ||
301 | * second when there are three segments, and third if there are four. | ||
302 | * @param va_M1 Virtual address of "middle 1" segment; this segment is sent | ||
303 | * second when there are four segments. | ||
304 | * @param len_M0_M1 Length of middle 0 segment in low 16 bits; length of middle | ||
305 | * 1 segment, if 4 segments, in high 16 bits. | ||
306 | */ | ||
307 | #define __netio_fastio_sendv_pkt_3_4(fastio_index, flags, confno, csum0, va_F, \ | ||
308 | va_L, len_F_L, va_M0, va_M1, len_M0_M1) \ | ||
309 | __netio_fastio9((fastio_index) + NETIO_FASTIO_SENDV_PKT, flags, confno, \ | ||
310 | csum0, va_F, va_L, len_F_L, va_M0, va_M1, len_M0_M1) | ||
311 | |||
312 | /** Send vector of packets. | ||
313 | * @param fastio_index Fast I/O index. | ||
314 | * @param seqno Number of packets transmitted so far on this interface; | ||
315 | * used to decide which packets should be acknowledged. | ||
316 | * @param nentries Number of entries in vector. | ||
317 | * @param va Virtual address of start of vector entry array. | ||
318 | * @return 3-word netio_fastio_rv3_t structure. The structure's err member | ||
319 | * is an error code, or zero if no error. The val0 member is the | ||
320 | * updated value of seqno; it has been incremented by 1 for each | ||
321 | * packet sent. That increment may be less than nentries if an | ||
322 | * error occured, or if some of the entries in the vector contain | ||
323 | * handles equal to NETIO_PKT_HANDLE_NONE. The val1 member is the | ||
324 | * updated value of nentries; it has been decremented by 1 for each | ||
325 | * vector entry processed. Again, that decrement may be less than | ||
326 | * nentries (leaving the returned value positive) if an error | ||
327 | * occurred. | ||
328 | */ | ||
329 | #define __netio_fastio_send_pkt_vec(fastio_index, seqno, nentries, va) \ | ||
330 | __netio_fastio3_rv3((fastio_index) + NETIO_FASTIO_SEND_PKT_VEC, seqno, \ | ||
331 | nentries, va) | ||
332 | |||
333 | |||
334 | /** An egress DMA command for LEPP. */ | ||
335 | typedef struct | ||
336 | { | ||
337 | /** Is this a TSO transfer? | ||
338 | * | ||
339 | * NOTE: This field is always 0, to distinguish it from | ||
340 | * lepp_tso_cmd_t. It must come first! | ||
341 | */ | ||
342 | uint8_t tso : 1; | ||
343 | |||
344 | /** Unused padding bits. */ | ||
345 | uint8_t _unused : 3; | ||
346 | |||
347 | /** Should this packet be sent directly from caches instead of DRAM, | ||
348 | * using hash-for-home to locate the packet data? | ||
349 | */ | ||
350 | uint8_t hash_for_home : 1; | ||
351 | |||
352 | /** Should we compute a checksum? */ | ||
353 | uint8_t compute_checksum : 1; | ||
354 | |||
355 | /** Is this the final buffer for this packet? | ||
356 | * | ||
357 | * A single packet can be split over several input buffers (a "gather" | ||
358 | * operation). This flag indicates that this is the last buffer | ||
359 | * in a packet. | ||
360 | */ | ||
361 | uint8_t end_of_packet : 1; | ||
362 | |||
363 | /** Should LEPP advance 'comp_busy' when this DMA is fully finished? */ | ||
364 | uint8_t send_completion : 1; | ||
365 | |||
366 | /** High bits of Client Physical Address of the start of the buffer | ||
367 | * to be egressed. | ||
368 | * | ||
369 | * NOTE: Only 6 bits are actually needed here, as CPAs are | ||
370 | * currently 38 bits. So two bits could be scavenged from this. | ||
371 | */ | ||
372 | uint8_t cpa_hi; | ||
373 | |||
374 | /** The number of bytes to be egressed. */ | ||
375 | uint16_t length; | ||
376 | |||
377 | /** Low 32 bits of Client Physical Address of the start of the buffer | ||
378 | * to be egressed. | ||
379 | */ | ||
380 | uint32_t cpa_lo; | ||
381 | |||
382 | /** Checksum information (only used if 'compute_checksum'). */ | ||
383 | __netio_checksum_header_t checksum_data; | ||
384 | |||
385 | } lepp_cmd_t; | ||
386 | |||
387 | |||
388 | /** A chunk of physical memory for a TSO egress. */ | ||
389 | typedef struct | ||
390 | { | ||
391 | /** The low bits of the CPA. */ | ||
392 | uint32_t cpa_lo; | ||
393 | /** The high bits of the CPA. */ | ||
394 | uint16_t cpa_hi : 15; | ||
395 | /** Should this packet be sent directly from caches instead of DRAM, | ||
396 | * using hash-for-home to locate the packet data? | ||
397 | */ | ||
398 | uint16_t hash_for_home : 1; | ||
399 | /** The length in bytes. */ | ||
400 | uint16_t length; | ||
401 | } lepp_frag_t; | ||
402 | |||
403 | |||
404 | /** An LEPP command that handles TSO. */ | ||
405 | typedef struct | ||
406 | { | ||
407 | /** Is this a TSO transfer? | ||
408 | * | ||
409 | * NOTE: This field is always 1, to distinguish it from | ||
410 | * lepp_cmd_t. It must come first! | ||
411 | */ | ||
412 | uint8_t tso : 1; | ||
413 | |||
414 | /** Unused padding bits. */ | ||
415 | uint8_t _unused : 7; | ||
416 | |||
417 | /** Size of the header[] array in bytes. It must be in the range | ||
418 | * [40, 127], which are the smallest header for a TCP packet over | ||
419 | * Ethernet and the maximum possible prepend size supported by | ||
420 | * hardware, respectively. Note that the array storage must be | ||
421 | * padded out to a multiple of four bytes so that the following | ||
422 | * LEPP command is aligned properly. | ||
423 | */ | ||
424 | uint8_t header_size; | ||
425 | |||
426 | /** Byte offset of the IP header in header[]. */ | ||
427 | uint8_t ip_offset; | ||
428 | |||
429 | /** Byte offset of the TCP header in header[]. */ | ||
430 | uint8_t tcp_offset; | ||
431 | |||
432 | /** The number of bytes to use for the payload of each packet, | ||
433 | * except of course the last one, which may not have enough bytes. | ||
434 | * This means that each Ethernet packet except the last will have a | ||
435 | * size of header_size + payload_size. | ||
436 | */ | ||
437 | uint16_t payload_size; | ||
438 | |||
439 | /** The length of the 'frags' array that follows this struct. */ | ||
440 | uint16_t num_frags; | ||
441 | |||
442 | /** The actual frags. */ | ||
443 | lepp_frag_t frags[0 /* Variable-sized; num_frags entries. */]; | ||
444 | |||
445 | /* | ||
446 | * The packet header template logically follows frags[], | ||
447 | * but you can't declare that in C. | ||
448 | * | ||
449 | * uint32_t header[header_size_in_words_rounded_up]; | ||
450 | */ | ||
451 | |||
452 | } lepp_tso_cmd_t; | ||
453 | |||
454 | |||
455 | /** An LEPP completion ring entry. */ | ||
456 | typedef void* lepp_comp_t; | ||
457 | |||
458 | |||
459 | /** Maximum number of frags for one TSO command. This is adapted from | ||
460 | * linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for | ||
461 | * our page size of exactly 65536. We add one for a "body" fragment. | ||
462 | */ | ||
463 | #define LEPP_MAX_FRAGS (65536 / HV_PAGE_SIZE_SMALL + 2 + 1) | ||
464 | |||
465 | /** Total number of bytes needed for an lepp_tso_cmd_t. */ | ||
466 | #define LEPP_TSO_CMD_SIZE(num_frags, header_size) \ | ||
467 | (sizeof(lepp_tso_cmd_t) + \ | ||
468 | (num_frags) * sizeof(lepp_frag_t) + \ | ||
469 | (((header_size) + 3) & -4)) | ||
470 | |||
471 | /** The size of the lepp "cmd" queue. */ | ||
472 | #define LEPP_CMD_QUEUE_BYTES \ | ||
473 | (((CHIP_L2_CACHE_SIZE() - 2 * CHIP_L2_LINE_SIZE()) / \ | ||
474 | (sizeof(lepp_cmd_t) + sizeof(lepp_comp_t))) * sizeof(lepp_cmd_t)) | ||
475 | |||
476 | /** The largest possible command that can go in lepp_queue_t::cmds[]. */ | ||
477 | #define LEPP_MAX_CMD_SIZE LEPP_TSO_CMD_SIZE(LEPP_MAX_FRAGS, 128) | ||
478 | |||
479 | /** The largest possible value of lepp_queue_t::cmd_{head, tail} (inclusive). | ||
480 | */ | ||
481 | #define LEPP_CMD_LIMIT \ | ||
482 | (LEPP_CMD_QUEUE_BYTES - LEPP_MAX_CMD_SIZE) | ||
483 | |||
484 | /** The maximum number of completions in an LEPP queue. */ | ||
485 | #define LEPP_COMP_QUEUE_SIZE \ | ||
486 | ((LEPP_CMD_LIMIT + sizeof(lepp_cmd_t) - 1) / sizeof(lepp_cmd_t)) | ||
487 | |||
488 | /** Increment an index modulo the queue size. */ | ||
489 | #define LEPP_QINC(var) \ | ||
490 | (var = __insn_mnz(var - (LEPP_COMP_QUEUE_SIZE - 1), var + 1)) | ||
491 | |||
492 | /** A queue used to convey egress commands from the client to LEPP. */ | ||
493 | typedef struct | ||
494 | { | ||
495 | /** Index of first completion not yet processed by user code. | ||
496 | * If this is equal to comp_busy, there are no such completions. | ||
497 | * | ||
498 | * NOTE: This is only read/written by the user. | ||
499 | */ | ||
500 | unsigned int comp_head; | ||
501 | |||
502 | /** Index of first completion record not yet completed. | ||
503 | * If this is equal to comp_tail, there are no such completions. | ||
504 | * This index gets advanced (modulo LEPP_QUEUE_SIZE) whenever | ||
505 | * a command with the 'completion' bit set is finished. | ||
506 | * | ||
507 | * NOTE: This is only written by LEPP, only read by the user. | ||
508 | */ | ||
509 | volatile unsigned int comp_busy; | ||
510 | |||
511 | /** Index of the first empty slot in the completion ring. | ||
512 | * Entries from this up to but not including comp_head (in ring order) | ||
513 | * can be filled in with completion data. | ||
514 | * | ||
515 | * NOTE: This is only read/written by the user. | ||
516 | */ | ||
517 | unsigned int comp_tail; | ||
518 | |||
519 | /** Byte index of first command enqueued for LEPP but not yet processed. | ||
520 | * | ||
521 | * This is always divisible by sizeof(void*) and always <= LEPP_CMD_LIMIT. | ||
522 | * | ||
523 | * NOTE: LEPP advances this counter as soon as it no longer needs | ||
524 | * the cmds[] storage for this entry, but the transfer is not actually | ||
525 | * complete (i.e. the buffer pointed to by the command is no longer | ||
526 | * needed) until comp_busy advances. | ||
527 | * | ||
528 | * If this is equal to cmd_tail, the ring is empty. | ||
529 | * | ||
530 | * NOTE: This is only written by LEPP, only read by the user. | ||
531 | */ | ||
532 | volatile unsigned int cmd_head; | ||
533 | |||
534 | /** Byte index of first empty slot in the command ring. This field can | ||
535 | * be incremented up to but not equal to cmd_head (because that would | ||
536 | * mean the ring is empty). | ||
537 | * | ||
538 | * This is always divisible by sizeof(void*) and always <= LEPP_CMD_LIMIT. | ||
539 | * | ||
540 | * NOTE: This is read/written by the user, only read by LEPP. | ||
541 | */ | ||
542 | volatile unsigned int cmd_tail; | ||
543 | |||
544 | /** A ring of variable-sized egress DMA commands. | ||
545 | * | ||
546 | * NOTE: Only written by the user, only read by LEPP. | ||
547 | */ | ||
548 | char cmds[LEPP_CMD_QUEUE_BYTES] | ||
549 | __attribute__((aligned(CHIP_L2_LINE_SIZE()))); | ||
550 | |||
551 | /** A ring of user completion data. | ||
552 | * NOTE: Only read/written by the user. | ||
553 | */ | ||
554 | lepp_comp_t comps[LEPP_COMP_QUEUE_SIZE] | ||
555 | __attribute__((aligned(CHIP_L2_LINE_SIZE()))); | ||
556 | } lepp_queue_t; | ||
557 | |||
558 | |||
559 | /** An internal helper function for determining the number of entries | ||
560 | * available in a ring buffer, given that there is one sentinel. | ||
561 | */ | ||
562 | static inline unsigned int | ||
563 | _lepp_num_free_slots(unsigned int head, unsigned int tail) | ||
564 | { | ||
565 | /* | ||
566 | * One entry is reserved for use as a sentinel, to distinguish | ||
567 | * "empty" from "full". So we compute | ||
568 | * (head - tail - 1) % LEPP_QUEUE_SIZE, but without using a slow % operation. | ||
569 | */ | ||
570 | return (head - tail - 1) + ((head <= tail) ? LEPP_COMP_QUEUE_SIZE : 0); | ||
571 | } | ||
572 | |||
573 | |||
574 | /** Returns how many new comp entries can be enqueued. */ | ||
575 | static inline unsigned int | ||
576 | lepp_num_free_comp_slots(const lepp_queue_t* q) | ||
577 | { | ||
578 | return _lepp_num_free_slots(q->comp_head, q->comp_tail); | ||
579 | } | ||
580 | |||
581 | static inline int | ||
582 | lepp_qsub(int v1, int v2) | ||
583 | { | ||
584 | int delta = v1 - v2; | ||
585 | return delta + ((delta >> 31) & LEPP_COMP_QUEUE_SIZE); | ||
586 | } | ||
587 | |||
588 | |||
589 | /** FIXME: Check this from linux, via a new "pwrite()" call. */ | ||
590 | #define LIPP_VERSION 1 | ||
591 | |||
592 | |||
593 | /** We use exactly two bytes of alignment padding. */ | ||
594 | #define LIPP_PACKET_PADDING 2 | ||
595 | |||
596 | /** The minimum size of a "small" buffer (including the padding). */ | ||
597 | #define LIPP_SMALL_PACKET_SIZE 128 | ||
598 | |||
599 | /* | ||
600 | * NOTE: The following two values should total to less than around | ||
601 | * 13582, to keep the total size used for "lipp_state_t" below 64K. | ||
602 | */ | ||
603 | |||
604 | /** The maximum number of "small" buffers. | ||
605 | * This is enough for 53 network cpus with 128 credits. Note that | ||
606 | * if these are exhausted, we will fall back to using large buffers. | ||
607 | */ | ||
608 | #define LIPP_SMALL_BUFFERS 6785 | ||
609 | |||
610 | /** The maximum number of "large" buffers. | ||
611 | * This is enough for 53 network cpus with 128 credits. | ||
612 | */ | ||
613 | #define LIPP_LARGE_BUFFERS 6785 | ||
614 | |||
615 | #endif /* __DRV_XGBE_INTF_H__ */ | ||
diff --git a/arch/tile/include/hv/netio_errors.h b/arch/tile/include/hv/netio_errors.h new file mode 100644 index 000000000000..e1591bff61b5 --- /dev/null +++ b/arch/tile/include/hv/netio_errors.h | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * Error codes returned from NetIO routines. | ||
17 | */ | ||
18 | |||
19 | #ifndef __NETIO_ERRORS_H__ | ||
20 | #define __NETIO_ERRORS_H__ | ||
21 | |||
22 | /** | ||
23 | * @addtogroup error | ||
24 | * | ||
25 | * @brief The error codes returned by NetIO functions. | ||
26 | * | ||
27 | * NetIO functions return 0 (defined as ::NETIO_NO_ERROR) on success, and | ||
28 | * a negative value if an error occurs. | ||
29 | * | ||
30 | * In cases where a NetIO function failed due to a error reported by | ||
31 | * system libraries, the error code will be the negation of the | ||
32 | * system errno at the time of failure. The @ref netio_strerror() | ||
33 | * function will deliver error strings for both NetIO and system error | ||
34 | * codes. | ||
35 | * | ||
36 | * @{ | ||
37 | */ | ||
38 | |||
39 | /** The set of all NetIO errors. */ | ||
40 | typedef enum | ||
41 | { | ||
42 | /** Operation successfully completed. */ | ||
43 | NETIO_NO_ERROR = 0, | ||
44 | |||
45 | /** A packet was successfully retrieved from an input queue. */ | ||
46 | NETIO_PKT = 0, | ||
47 | |||
48 | /** Largest NetIO error number. */ | ||
49 | NETIO_ERR_MAX = -701, | ||
50 | |||
51 | /** The tile is not registered with the IPP. */ | ||
52 | NETIO_NOT_REGISTERED = -701, | ||
53 | |||
54 | /** No packet was available to retrieve from the input queue. */ | ||
55 | NETIO_NOPKT = -702, | ||
56 | |||
57 | /** The requested function is not implemented. */ | ||
58 | NETIO_NOT_IMPLEMENTED = -703, | ||
59 | |||
60 | /** On a registration operation, the target queue already has the maximum | ||
61 | * number of tiles registered for it, and no more may be added. On a | ||
62 | * packet send operation, the output queue is full and nothing more can | ||
63 | * be queued until some of the queued packets are actually transmitted. */ | ||
64 | NETIO_QUEUE_FULL = -704, | ||
65 | |||
66 | /** The calling process or thread is not bound to exactly one CPU. */ | ||
67 | NETIO_BAD_AFFINITY = -705, | ||
68 | |||
69 | /** Cannot allocate memory on requested controllers. */ | ||
70 | NETIO_CANNOT_HOME = -706, | ||
71 | |||
72 | /** On a registration operation, the IPP specified is not configured | ||
73 | * to support the options requested; for instance, the application | ||
74 | * wants a specific type of tagged headers which the configured IPP | ||
75 | * doesn't support. Or, the supplied configuration information is | ||
76 | * not self-consistent, or is out of range; for instance, specifying | ||
77 | * both NETIO_RECV and NETIO_NO_RECV, or asking for more than | ||
78 | * NETIO_MAX_SEND_BUFFERS to be preallocated. On a VLAN or bucket | ||
79 | * configure operation, the number of items, or the base item, was | ||
80 | * out of range. | ||
81 | */ | ||
82 | NETIO_BAD_CONFIG = -707, | ||
83 | |||
84 | /** Too many tiles have registered to transmit packets. */ | ||
85 | NETIO_TOOMANY_XMIT = -708, | ||
86 | |||
87 | /** Packet transmission was attempted on a queue which was registered | ||
88 | with transmit disabled. */ | ||
89 | NETIO_UNREG_XMIT = -709, | ||
90 | |||
91 | /** This tile is already registered with the IPP. */ | ||
92 | NETIO_ALREADY_REGISTERED = -710, | ||
93 | |||
94 | /** The Ethernet link is down. The application should try again later. */ | ||
95 | NETIO_LINK_DOWN = -711, | ||
96 | |||
97 | /** An invalid memory buffer has been specified. This may be an unmapped | ||
98 | * virtual address, or one which does not meet alignment requirements. | ||
99 | * For netio_input_register(), this error may be returned when multiple | ||
100 | * processes specify different memory regions to be used for NetIO | ||
101 | * buffers. That can happen if these processes specify explicit memory | ||
102 | * regions with the ::NETIO_FIXED_BUFFER_VA flag, or if tmc_cmem_init() | ||
103 | * has not been called by a common ancestor of the processes. | ||
104 | */ | ||
105 | NETIO_FAULT = -712, | ||
106 | |||
107 | /** Cannot combine user-managed shared memory and cache coherence. */ | ||
108 | NETIO_BAD_CACHE_CONFIG = -713, | ||
109 | |||
110 | /** Smallest NetIO error number. */ | ||
111 | NETIO_ERR_MIN = -713, | ||
112 | |||
113 | #ifndef __DOXYGEN__ | ||
114 | /** Used internally to mean that no response is needed; never returned to | ||
115 | * an application. */ | ||
116 | NETIO_NO_RESPONSE = 1 | ||
117 | #endif | ||
118 | } netio_error_t; | ||
119 | |||
120 | /** @} */ | ||
121 | |||
122 | #endif /* __NETIO_ERRORS_H__ */ | ||
diff --git a/arch/tile/include/hv/netio_intf.h b/arch/tile/include/hv/netio_intf.h new file mode 100644 index 000000000000..8d20972aba2c --- /dev/null +++ b/arch/tile/include/hv/netio_intf.h | |||
@@ -0,0 +1,2975 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * NetIO interface structures and macros. | ||
17 | */ | ||
18 | |||
19 | #ifndef __NETIO_INTF_H__ | ||
20 | #define __NETIO_INTF_H__ | ||
21 | |||
22 | #include <hv/netio_errors.h> | ||
23 | |||
24 | #ifdef __KERNEL__ | ||
25 | #include <linux/types.h> | ||
26 | #else | ||
27 | #include <stdint.h> | ||
28 | #endif | ||
29 | |||
30 | #if !defined(__HV__) && !defined(__BOGUX__) && !defined(__KERNEL__) | ||
31 | #include <assert.h> | ||
32 | #define netio_assert assert /**< Enable assertions from macros */ | ||
33 | #else | ||
34 | #define netio_assert(...) ((void)(0)) /**< Disable assertions from macros */ | ||
35 | #endif | ||
36 | |||
37 | /* | ||
38 | * If none of these symbols are defined, we're building libnetio in an | ||
39 | * environment where we have pthreads, so we'll enable locking. | ||
40 | */ | ||
41 | #if !defined(__HV__) && !defined(__BOGUX__) && !defined(__KERNEL__) && \ | ||
42 | !defined(__NEWLIB__) | ||
43 | #define _NETIO_PTHREAD /**< Include a mutex in netio_queue_t below */ | ||
44 | |||
45 | /* | ||
46 | * If NETIO_UNLOCKED is defined, we don't do use per-cpu locks on | ||
47 | * per-packet NetIO operations. We still do pthread locking on things | ||
48 | * like netio_input_register, though. This is used for building | ||
49 | * libnetio_unlocked. | ||
50 | */ | ||
51 | #ifndef NETIO_UNLOCKED | ||
52 | |||
53 | /* Avoid PLT overhead by using our own inlined per-cpu lock. */ | ||
54 | #include <sched.h> | ||
55 | typedef int _netio_percpu_mutex_t; | ||
56 | |||
57 | static __inline int | ||
58 | _netio_percpu_mutex_init(_netio_percpu_mutex_t* lock) | ||
59 | { | ||
60 | *lock = 0; | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static __inline int | ||
65 | _netio_percpu_mutex_lock(_netio_percpu_mutex_t* lock) | ||
66 | { | ||
67 | while (__builtin_expect(__insn_tns(lock), 0)) | ||
68 | sched_yield(); | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static __inline int | ||
73 | _netio_percpu_mutex_unlock(_netio_percpu_mutex_t* lock) | ||
74 | { | ||
75 | *lock = 0; | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | #else /* NETIO_UNLOCKED */ | ||
80 | |||
81 | /* Don't do any locking for per-packet NetIO operations. */ | ||
82 | typedef int _netio_percpu_mutex_t; | ||
83 | #define _netio_percpu_mutex_init(L) | ||
84 | #define _netio_percpu_mutex_lock(L) | ||
85 | #define _netio_percpu_mutex_unlock(L) | ||
86 | |||
87 | #endif /* NETIO_UNLOCKED */ | ||
88 | #endif /* !__HV__, !__BOGUX, !__KERNEL__, !__NEWLIB__ */ | ||
89 | |||
90 | /** How many tiles can register for a given queue. | ||
91 | * @ingroup setup */ | ||
92 | #define NETIO_MAX_TILES_PER_QUEUE 64 | ||
93 | |||
94 | |||
95 | /** Largest permissible queue identifier. | ||
96 | * @ingroup setup */ | ||
97 | #define NETIO_MAX_QUEUE_ID 255 | ||
98 | |||
99 | |||
100 | #ifndef __DOXYGEN__ | ||
101 | |||
102 | /* Metadata packet checksum/ethertype flags. */ | ||
103 | |||
104 | /** The L4 checksum has not been calculated. */ | ||
105 | #define _NETIO_PKT_NO_L4_CSUM_SHIFT 0 | ||
106 | #define _NETIO_PKT_NO_L4_CSUM_RMASK 1 | ||
107 | #define _NETIO_PKT_NO_L4_CSUM_MASK \ | ||
108 | (_NETIO_PKT_NO_L4_CSUM_RMASK << _NETIO_PKT_NO_L4_CSUM_SHIFT) | ||
109 | |||
110 | /** The L3 checksum has not been calculated. */ | ||
111 | #define _NETIO_PKT_NO_L3_CSUM_SHIFT 1 | ||
112 | #define _NETIO_PKT_NO_L3_CSUM_RMASK 1 | ||
113 | #define _NETIO_PKT_NO_L3_CSUM_MASK \ | ||
114 | (_NETIO_PKT_NO_L3_CSUM_RMASK << _NETIO_PKT_NO_L3_CSUM_SHIFT) | ||
115 | |||
116 | /** The L3 checksum is incorrect (or perhaps has not been calculated). */ | ||
117 | #define _NETIO_PKT_BAD_L3_CSUM_SHIFT 2 | ||
118 | #define _NETIO_PKT_BAD_L3_CSUM_RMASK 1 | ||
119 | #define _NETIO_PKT_BAD_L3_CSUM_MASK \ | ||
120 | (_NETIO_PKT_BAD_L3_CSUM_RMASK << _NETIO_PKT_BAD_L3_CSUM_SHIFT) | ||
121 | |||
122 | /** The Ethernet packet type is unrecognized. */ | ||
123 | #define _NETIO_PKT_TYPE_UNRECOGNIZED_SHIFT 3 | ||
124 | #define _NETIO_PKT_TYPE_UNRECOGNIZED_RMASK 1 | ||
125 | #define _NETIO_PKT_TYPE_UNRECOGNIZED_MASK \ | ||
126 | (_NETIO_PKT_TYPE_UNRECOGNIZED_RMASK << \ | ||
127 | _NETIO_PKT_TYPE_UNRECOGNIZED_SHIFT) | ||
128 | |||
129 | /* Metadata packet type flags. */ | ||
130 | |||
131 | /** Where the packet type bits are; this field is the index into | ||
132 | * _netio_pkt_info. */ | ||
133 | #define _NETIO_PKT_TYPE_SHIFT 4 | ||
134 | #define _NETIO_PKT_TYPE_RMASK 0x3F | ||
135 | |||
136 | /** How many VLAN tags the packet has, and, if we have two, which one we | ||
137 | * actually grouped on. A VLAN within a proprietary (Marvell or Broadcom) | ||
138 | * tag is counted here. */ | ||
139 | #define _NETIO_PKT_VLAN_SHIFT 4 | ||
140 | #define _NETIO_PKT_VLAN_RMASK 0x3 | ||
141 | #define _NETIO_PKT_VLAN_MASK \ | ||
142 | (_NETIO_PKT_VLAN_RMASK << _NETIO_PKT_VLAN_SHIFT) | ||
143 | #define _NETIO_PKT_VLAN_NONE 0 /* No VLAN tag. */ | ||
144 | #define _NETIO_PKT_VLAN_ONE 1 /* One VLAN tag. */ | ||
145 | #define _NETIO_PKT_VLAN_TWO_OUTER 2 /* Two VLAN tags, outer one used. */ | ||
146 | #define _NETIO_PKT_VLAN_TWO_INNER 3 /* Two VLAN tags, inner one used. */ | ||
147 | |||
148 | /** Which proprietary tags the packet has. */ | ||
149 | #define _NETIO_PKT_TAG_SHIFT 6 | ||
150 | #define _NETIO_PKT_TAG_RMASK 0x3 | ||
151 | #define _NETIO_PKT_TAG_MASK \ | ||
152 | (_NETIO_PKT_TAG_RMASK << _NETIO_PKT_TAG_SHIFT) | ||
153 | #define _NETIO_PKT_TAG_NONE 0 /* No proprietary tags. */ | ||
154 | #define _NETIO_PKT_TAG_MRVL 1 /* Marvell HyperG.Stack tags. */ | ||
155 | #define _NETIO_PKT_TAG_MRVL_EXT 2 /* HyperG.Stack extended tags. */ | ||
156 | #define _NETIO_PKT_TAG_BRCM 3 /* Broadcom HiGig tags. */ | ||
157 | |||
158 | /** Whether a packet has an LLC + SNAP header. */ | ||
159 | #define _NETIO_PKT_SNAP_SHIFT 8 | ||
160 | #define _NETIO_PKT_SNAP_RMASK 0x1 | ||
161 | #define _NETIO_PKT_SNAP_MASK \ | ||
162 | (_NETIO_PKT_SNAP_RMASK << _NETIO_PKT_SNAP_SHIFT) | ||
163 | |||
164 | /* NOTE: Bits 9 and 10 are unused. */ | ||
165 | |||
166 | /** Length of any custom data before the L2 header, in words. */ | ||
167 | #define _NETIO_PKT_CUSTOM_LEN_SHIFT 11 | ||
168 | #define _NETIO_PKT_CUSTOM_LEN_RMASK 0x1F | ||
169 | #define _NETIO_PKT_CUSTOM_LEN_MASK \ | ||
170 | (_NETIO_PKT_CUSTOM_LEN_RMASK << _NETIO_PKT_CUSTOM_LEN_SHIFT) | ||
171 | |||
172 | /** The L4 checksum is incorrect (or perhaps has not been calculated). */ | ||
173 | #define _NETIO_PKT_BAD_L4_CSUM_SHIFT 16 | ||
174 | #define _NETIO_PKT_BAD_L4_CSUM_RMASK 0x1 | ||
175 | #define _NETIO_PKT_BAD_L4_CSUM_MASK \ | ||
176 | (_NETIO_PKT_BAD_L4_CSUM_RMASK << _NETIO_PKT_BAD_L4_CSUM_SHIFT) | ||
177 | |||
178 | /** Length of the L2 header, in words. */ | ||
179 | #define _NETIO_PKT_L2_LEN_SHIFT 17 | ||
180 | #define _NETIO_PKT_L2_LEN_RMASK 0x1F | ||
181 | #define _NETIO_PKT_L2_LEN_MASK \ | ||
182 | (_NETIO_PKT_L2_LEN_RMASK << _NETIO_PKT_L2_LEN_SHIFT) | ||
183 | |||
184 | |||
185 | /* Flags in minimal packet metadata. */ | ||
186 | |||
187 | /** We need an eDMA checksum on this packet. */ | ||
188 | #define _NETIO_PKT_NEED_EDMA_CSUM_SHIFT 0 | ||
189 | #define _NETIO_PKT_NEED_EDMA_CSUM_RMASK 1 | ||
190 | #define _NETIO_PKT_NEED_EDMA_CSUM_MASK \ | ||
191 | (_NETIO_PKT_NEED_EDMA_CSUM_RMASK << _NETIO_PKT_NEED_EDMA_CSUM_SHIFT) | ||
192 | |||
193 | /* Data within the packet information table. */ | ||
194 | |||
195 | /* Note that, for efficiency, code which uses these fields assumes that none | ||
196 | * of the shift values below are zero. See uses below for an explanation. */ | ||
197 | |||
198 | /** Offset within the L2 header of the innermost ethertype (in halfwords). */ | ||
199 | #define _NETIO_PKT_INFO_ETYPE_SHIFT 6 | ||
200 | #define _NETIO_PKT_INFO_ETYPE_RMASK 0x1F | ||
201 | |||
202 | /** Offset within the L2 header of the VLAN tag (in halfwords). */ | ||
203 | #define _NETIO_PKT_INFO_VLAN_SHIFT 11 | ||
204 | #define _NETIO_PKT_INFO_VLAN_RMASK 0x1F | ||
205 | |||
206 | #endif | ||
207 | |||
208 | |||
209 | /** The size of a memory buffer representing a small packet. | ||
210 | * @ingroup egress */ | ||
211 | #define SMALL_PACKET_SIZE 256 | ||
212 | |||
213 | /** The size of a memory buffer representing a large packet. | ||
214 | * @ingroup egress */ | ||
215 | #define LARGE_PACKET_SIZE 2048 | ||
216 | |||
217 | /** The size of a memory buffer representing a jumbo packet. | ||
218 | * @ingroup egress */ | ||
219 | #define JUMBO_PACKET_SIZE (12 * 1024) | ||
220 | |||
221 | |||
222 | /* Common ethertypes. | ||
223 | * @ingroup ingress */ | ||
224 | /** @{ */ | ||
225 | /** The ethertype of IPv4. */ | ||
226 | #define ETHERTYPE_IPv4 (0x0800) | ||
227 | /** The ethertype of ARP. */ | ||
228 | #define ETHERTYPE_ARP (0x0806) | ||
229 | /** The ethertype of VLANs. */ | ||
230 | #define ETHERTYPE_VLAN (0x8100) | ||
231 | /** The ethertype of a Q-in-Q header. */ | ||
232 | #define ETHERTYPE_Q_IN_Q (0x9100) | ||
233 | /** The ethertype of IPv6. */ | ||
234 | #define ETHERTYPE_IPv6 (0x86DD) | ||
235 | /** The ethertype of MPLS. */ | ||
236 | #define ETHERTYPE_MPLS (0x8847) | ||
237 | /** @} */ | ||
238 | |||
239 | |||
240 | /** The possible return values of NETIO_PKT_STATUS. | ||
241 | * @ingroup ingress | ||
242 | */ | ||
243 | typedef enum | ||
244 | { | ||
245 | /** No problems were detected with this packet. */ | ||
246 | NETIO_PKT_STATUS_OK, | ||
247 | /** The packet is undersized; this is expected behavior if the packet's | ||
248 | * ethertype is unrecognized, but otherwise the packet is likely corrupt. */ | ||
249 | NETIO_PKT_STATUS_UNDERSIZE, | ||
250 | /** The packet is oversized and some trailing bytes have been discarded. | ||
251 | This is expected behavior for short packets, since it's impossible to | ||
252 | precisely determine the amount of padding which may have been added to | ||
253 | them to make them meet the minimum Ethernet packet size. */ | ||
254 | NETIO_PKT_STATUS_OVERSIZE, | ||
255 | /** The packet was judged to be corrupt by hardware (for instance, it had | ||
256 | a bad CRC, or part of it was discarded due to lack of buffer space in | ||
257 | the I/O shim) and should be discarded. */ | ||
258 | NETIO_PKT_STATUS_BAD | ||
259 | } netio_pkt_status_t; | ||
260 | |||
261 | |||
262 | /** Log2 of how many buckets we have. */ | ||
263 | #define NETIO_LOG2_NUM_BUCKETS (10) | ||
264 | |||
265 | /** How many buckets we have. | ||
266 | * @ingroup ingress */ | ||
267 | #define NETIO_NUM_BUCKETS (1 << NETIO_LOG2_NUM_BUCKETS) | ||
268 | |||
269 | |||
270 | /** | ||
271 | * @brief A group-to-bucket identifier. | ||
272 | * | ||
273 | * @ingroup setup | ||
274 | * | ||
275 | * This tells us what to do with a given group. | ||
276 | */ | ||
277 | typedef union { | ||
278 | /** The header broken down into bits. */ | ||
279 | struct { | ||
280 | /** Whether we should balance on L4, if available */ | ||
281 | unsigned int __balance_on_l4:1; | ||
282 | /** Whether we should balance on L3, if available */ | ||
283 | unsigned int __balance_on_l3:1; | ||
284 | /** Whether we should balance on L2, if available */ | ||
285 | unsigned int __balance_on_l2:1; | ||
286 | /** Reserved for future use */ | ||
287 | unsigned int __reserved:1; | ||
288 | /** The base bucket to use to send traffic */ | ||
289 | unsigned int __bucket_base:NETIO_LOG2_NUM_BUCKETS; | ||
290 | /** The mask to apply to the balancing value. This must be one less | ||
291 | * than a power of two, e.g. 0x3 or 0xFF. | ||
292 | */ | ||
293 | unsigned int __bucket_mask:NETIO_LOG2_NUM_BUCKETS; | ||
294 | /** Pad to 32 bits */ | ||
295 | unsigned int __padding:(32 - 4 - 2 * NETIO_LOG2_NUM_BUCKETS); | ||
296 | } bits; | ||
297 | /** To send out the IDN. */ | ||
298 | unsigned int word; | ||
299 | } | ||
300 | netio_group_t; | ||
301 | |||
302 | |||
303 | /** | ||
304 | * @brief A VLAN-to-bucket identifier. | ||
305 | * | ||
306 | * @ingroup setup | ||
307 | * | ||
308 | * This tells us what to do with a given VLAN. | ||
309 | */ | ||
310 | typedef netio_group_t netio_vlan_t; | ||
311 | |||
312 | |||
313 | /** | ||
314 | * A bucket-to-queue mapping. | ||
315 | * @ingroup setup | ||
316 | */ | ||
317 | typedef unsigned char netio_bucket_t; | ||
318 | |||
319 | |||
320 | /** | ||
321 | * A packet size can always fit in a netio_size_t. | ||
322 | * @ingroup setup | ||
323 | */ | ||
324 | typedef unsigned int netio_size_t; | ||
325 | |||
326 | |||
327 | /** | ||
328 | * @brief Ethernet standard (ingress) packet metadata. | ||
329 | * | ||
330 | * @ingroup ingress | ||
331 | * | ||
332 | * This is additional data associated with each packet. | ||
333 | * This structure is opaque and accessed through the @ref ingress. | ||
334 | * | ||
335 | * Also, the buffer population operation currently assumes that standard | ||
336 | * metadata is at least as large as minimal metadata, and will need to be | ||
337 | * modified if that is no longer the case. | ||
338 | */ | ||
339 | typedef struct | ||
340 | { | ||
341 | #ifdef __DOXYGEN__ | ||
342 | /** This structure is opaque. */ | ||
343 | unsigned char opaque[24]; | ||
344 | #else | ||
345 | /** The overall ordinal of the packet */ | ||
346 | unsigned int __packet_ordinal; | ||
347 | /** The ordinal of the packet within the group */ | ||
348 | unsigned int __group_ordinal; | ||
349 | /** The best flow hash IPP could compute. */ | ||
350 | unsigned int __flow_hash; | ||
351 | /** Flags pertaining to checksum calculation, packet type, etc. */ | ||
352 | unsigned int __flags; | ||
353 | /** The first word of "user data". */ | ||
354 | unsigned int __user_data_0; | ||
355 | /** The second word of "user data". */ | ||
356 | unsigned int __user_data_1; | ||
357 | #endif | ||
358 | } | ||
359 | netio_pkt_metadata_t; | ||
360 | |||
361 | |||
362 | /** To ensure that the L3 header is aligned mod 4, the L2 header should be | ||
363 | * aligned mod 4 plus 2, since every supported L2 header is 4n + 2 bytes | ||
364 | * long. The standard way to do this is to simply add 2 bytes of padding | ||
365 | * before the L2 header. | ||
366 | */ | ||
367 | #define NETIO_PACKET_PADDING 2 | ||
368 | |||
369 | |||
370 | |||
371 | /** | ||
372 | * @brief Ethernet minimal (egress) packet metadata. | ||
373 | * | ||
374 | * @ingroup egress | ||
375 | * | ||
376 | * This structure represents information about packets which have | ||
377 | * been processed by @ref netio_populate_buffer() or | ||
378 | * @ref netio_populate_prepend_buffer(). This structure is opaque | ||
379 | * and accessed through the @ref egress. | ||
380 | * | ||
381 | * @internal This structure is actually copied into the memory used by | ||
382 | * standard metadata, which is assumed to be large enough. | ||
383 | */ | ||
384 | typedef struct | ||
385 | { | ||
386 | #ifdef __DOXYGEN__ | ||
387 | /** This structure is opaque. */ | ||
388 | unsigned char opaque[14]; | ||
389 | #else | ||
390 | /** The offset of the L2 header from the start of the packet data. */ | ||
391 | unsigned short l2_offset; | ||
392 | /** The offset of the L3 header from the start of the packet data. */ | ||
393 | unsigned short l3_offset; | ||
394 | /** Where to write the checksum. */ | ||
395 | unsigned char csum_location; | ||
396 | /** Where to start checksumming from. */ | ||
397 | unsigned char csum_start; | ||
398 | /** Flags pertaining to checksum calculation etc. */ | ||
399 | unsigned short flags; | ||
400 | /** The L2 length of the packet. */ | ||
401 | unsigned short l2_length; | ||
402 | /** The checksum with which to seed the checksum generator. */ | ||
403 | unsigned short csum_seed; | ||
404 | /** How much to checksum. */ | ||
405 | unsigned short csum_length; | ||
406 | #endif | ||
407 | } | ||
408 | netio_pkt_minimal_metadata_t; | ||
409 | |||
410 | |||
411 | #ifndef __DOXYGEN__ | ||
412 | |||
413 | /** | ||
414 | * @brief An I/O notification header. | ||
415 | * | ||
416 | * This is the first word of data received from an I/O shim in a notification | ||
417 | * packet. It contains framing and status information. | ||
418 | */ | ||
419 | typedef union | ||
420 | { | ||
421 | unsigned int word; /**< The whole word. */ | ||
422 | /** The various fields. */ | ||
423 | struct | ||
424 | { | ||
425 | unsigned int __channel:7; /**< Resource channel. */ | ||
426 | unsigned int __type:4; /**< Type. */ | ||
427 | unsigned int __ack:1; /**< Whether an acknowledgement is needed. */ | ||
428 | unsigned int __reserved:1; /**< Reserved. */ | ||
429 | unsigned int __protocol:1; /**< A protocol-specific word is added. */ | ||
430 | unsigned int __status:2; /**< Status of the transfer. */ | ||
431 | unsigned int __framing:2; /**< Framing of the transfer. */ | ||
432 | unsigned int __transfer_size:14; /**< Transfer size in bytes (total). */ | ||
433 | } bits; | ||
434 | } | ||
435 | __netio_pkt_notif_t; | ||
436 | |||
437 | |||
438 | /** | ||
439 | * Returns the base address of the packet. | ||
440 | */ | ||
441 | #define _NETIO_PKT_HANDLE_BASE(p) \ | ||
442 | ((unsigned char*)((p).word & 0xFFFFFFC0)) | ||
443 | |||
444 | /** | ||
445 | * Returns the base address of the packet. | ||
446 | */ | ||
447 | #define _NETIO_PKT_BASE(p) \ | ||
448 | _NETIO_PKT_HANDLE_BASE(p->__packet) | ||
449 | |||
450 | /** | ||
451 | * @brief An I/O notification packet (second word) | ||
452 | * | ||
453 | * This is the second word of data received from an I/O shim in a notification | ||
454 | * packet. This is the virtual address of the packet buffer, plus some flag | ||
455 | * bits. (The virtual address of the packet is always 256-byte aligned so we | ||
456 | * have room for 8 bits' worth of flags in the low 8 bits.) | ||
457 | * | ||
458 | * @internal | ||
459 | * NOTE: The low two bits must contain "__queue", so the "packet size" | ||
460 | * (SIZE_SMALL, SIZE_LARGE, or SIZE_JUMBO) can be determined quickly. | ||
461 | * | ||
462 | * If __addr or __offset are moved, _NETIO_PKT_BASE | ||
463 | * (defined right below this) must be changed. | ||
464 | */ | ||
465 | typedef union | ||
466 | { | ||
467 | unsigned int word; /**< The whole word. */ | ||
468 | /** The various fields. */ | ||
469 | struct | ||
470 | { | ||
471 | /** Which queue the packet will be returned to once it is sent back to | ||
472 | the IPP. This is one of the SIZE_xxx values. */ | ||
473 | unsigned int __queue:2; | ||
474 | |||
475 | /** The IPP handle of the sending IPP. */ | ||
476 | unsigned int __ipp_handle:2; | ||
477 | |||
478 | /** Reserved for future use. */ | ||
479 | unsigned int __reserved:1; | ||
480 | |||
481 | /** If 1, this packet has minimal (egress) metadata; otherwise, it | ||
482 | has standard (ingress) metadata. */ | ||
483 | unsigned int __minimal:1; | ||
484 | |||
485 | /** Offset of the metadata within the packet. This value is multiplied | ||
486 | * by 64 and added to the base packet address to get the metadata | ||
487 | * address. Note that this field is aligned within the word such that | ||
488 | * you can easily extract the metadata address with a 26-bit mask. */ | ||
489 | unsigned int __offset:2; | ||
490 | |||
491 | /** The top 24 bits of the packet's virtual address. */ | ||
492 | unsigned int __addr:24; | ||
493 | } bits; | ||
494 | } | ||
495 | __netio_pkt_handle_t; | ||
496 | |||
497 | #endif /* !__DOXYGEN__ */ | ||
498 | |||
499 | |||
500 | /** | ||
501 | * @brief A handle for an I/O packet's storage. | ||
502 | * @ingroup ingress | ||
503 | * | ||
504 | * netio_pkt_handle_t encodes the concept of a ::netio_pkt_t with its | ||
505 | * packet metadata removed. It is a much smaller type that exists to | ||
506 | * facilitate applications where the full ::netio_pkt_t type is too | ||
507 | * large, such as those that cache enormous numbers of packets or wish | ||
508 | * to transmit packet descriptors over the UDN. | ||
509 | * | ||
510 | * Because there is no metadata, most ::netio_pkt_t operations cannot be | ||
511 | * performed on a netio_pkt_handle_t. It supports only | ||
512 | * netio_free_handle() (to free the buffer) and | ||
513 | * NETIO_PKT_CUSTOM_DATA_H() (to access a pointer to its contents). | ||
514 | * The application must acquire any additional metadata it wants from the | ||
515 | * original ::netio_pkt_t and record it separately. | ||
516 | * | ||
517 | * A netio_pkt_handle_t can be extracted from a ::netio_pkt_t by calling | ||
518 | * NETIO_PKT_HANDLE(). An invalid handle (analogous to NULL) can be | ||
519 | * created by assigning the value ::NETIO_PKT_HANDLE_NONE. A handle can | ||
520 | * be tested for validity with NETIO_PKT_HANDLE_IS_VALID(). | ||
521 | */ | ||
522 | typedef struct | ||
523 | { | ||
524 | unsigned int word; /**< Opaque bits. */ | ||
525 | } netio_pkt_handle_t; | ||
526 | |||
527 | /** | ||
528 | * @brief A packet descriptor. | ||
529 | * | ||
530 | * @ingroup ingress | ||
531 | * @ingroup egress | ||
532 | * | ||
533 | * This data structure represents a packet. The structure is manipulated | ||
534 | * through the @ref ingress and the @ref egress. | ||
535 | * | ||
536 | * While the contents of a netio_pkt_t are opaque, the structure itself is | ||
537 | * portable. This means that it may be shared between all tiles which have | ||
538 | * done a netio_input_register() call for the interface on which the pkt_t | ||
539 | * was initially received (via netio_get_packet()) or retrieved (via | ||
540 | * netio_get_buffer()). The contents of a netio_pkt_t can be transmitted to | ||
541 | * another tile via shared memory, or via a UDN message, or by other means. | ||
542 | * The destination tile may then use the pkt_t as if it had originally been | ||
543 | * received locally; it may read or write the packet's data, read its | ||
544 | * metadata, free the packet, send the packet, transfer the netio_pkt_t to | ||
545 | * yet another tile, and so forth. | ||
546 | * | ||
547 | * Once a netio_pkt_t has been transferred to a second tile, the first tile | ||
548 | * should not reference the original copy; in particular, if more than one | ||
549 | * tile frees or sends the same netio_pkt_t, the IPP's packet free lists will | ||
550 | * become corrupted. Note also that each tile which reads or modifies | ||
551 | * packet data must obey the memory coherency rules outlined in @ref input. | ||
552 | */ | ||
553 | typedef struct | ||
554 | { | ||
555 | #ifdef __DOXYGEN__ | ||
556 | /** This structure is opaque. */ | ||
557 | unsigned char opaque[32]; | ||
558 | #else | ||
559 | /** For an ingress packet (one with standard metadata), this is the | ||
560 | * notification header we got from the I/O shim. For an egress packet | ||
561 | * (one with minimal metadata), this word is zero if the packet has not | ||
562 | * been populated, and nonzero if it has. */ | ||
563 | __netio_pkt_notif_t __notif_header; | ||
564 | |||
565 | /** Virtual address of the packet buffer, plus state flags. */ | ||
566 | __netio_pkt_handle_t __packet; | ||
567 | |||
568 | /** Metadata associated with the packet. */ | ||
569 | netio_pkt_metadata_t __metadata; | ||
570 | #endif | ||
571 | } | ||
572 | netio_pkt_t; | ||
573 | |||
574 | |||
575 | #ifndef __DOXYGEN__ | ||
576 | |||
577 | #define __NETIO_PKT_NOTIF_HEADER(pkt) ((pkt)->__notif_header) | ||
578 | #define __NETIO_PKT_IPP_HANDLE(pkt) ((pkt)->__packet.bits.__ipp_handle) | ||
579 | #define __NETIO_PKT_QUEUE(pkt) ((pkt)->__packet.bits.__queue) | ||
580 | #define __NETIO_PKT_NOTIF_HEADER_M(mda, pkt) ((pkt)->__notif_header) | ||
581 | #define __NETIO_PKT_IPP_HANDLE_M(mda, pkt) ((pkt)->__packet.bits.__ipp_handle) | ||
582 | #define __NETIO_PKT_MINIMAL(pkt) ((pkt)->__packet.bits.__minimal) | ||
583 | #define __NETIO_PKT_QUEUE_M(mda, pkt) ((pkt)->__packet.bits.__queue) | ||
584 | #define __NETIO_PKT_FLAGS_M(mda, pkt) ((mda)->__flags) | ||
585 | |||
586 | /* Packet information table, used by the attribute access functions below. */ | ||
587 | extern const uint16_t _netio_pkt_info[]; | ||
588 | |||
589 | #endif /* __DOXYGEN__ */ | ||
590 | |||
591 | |||
592 | #ifndef __DOXYGEN__ | ||
593 | /* These macros are deprecated and will disappear in a future MDE release. */ | ||
594 | #define NETIO_PKT_GOOD_CHECKSUM(pkt) \ | ||
595 | NETIO_PKT_L4_CSUM_CORRECT(pkt) | ||
596 | #define NETIO_PKT_GOOD_CHECKSUM_M(mda, pkt) \ | ||
597 | NETIO_PKT_L4_CSUM_CORRECT_M(mda, pkt) | ||
598 | #endif /* __DOXYGEN__ */ | ||
599 | |||
600 | |||
601 | /* Packet attribute access functions. */ | ||
602 | |||
603 | /** Return a pointer to the metadata for a packet. | ||
604 | * @ingroup ingress | ||
605 | * | ||
606 | * Calling this function once and passing the result to other retrieval | ||
607 | * functions with a "_M" suffix usually improves performance. This | ||
608 | * function must be called on an 'ingress' packet (i.e. one retrieved | ||
609 | * by @ref netio_get_packet(), on which @ref netio_populate_buffer() or | ||
610 | * @ref netio_populate_prepend_buffer have not been called). Use of this | ||
611 | * function on an 'egress' packet will cause an assertion failure. | ||
612 | * | ||
613 | * @param[in] pkt Packet on which to operate. | ||
614 | * @return A pointer to the packet's standard metadata. | ||
615 | */ | ||
616 | static __inline netio_pkt_metadata_t* | ||
617 | NETIO_PKT_METADATA(netio_pkt_t* pkt) | ||
618 | { | ||
619 | netio_assert(!pkt->__packet.bits.__minimal); | ||
620 | return &pkt->__metadata; | ||
621 | } | ||
622 | |||
623 | |||
624 | /** Return a pointer to the minimal metadata for a packet. | ||
625 | * @ingroup egress | ||
626 | * | ||
627 | * Calling this function once and passing the result to other retrieval | ||
628 | * functions with a "_MM" suffix usually improves performance. This | ||
629 | * function must be called on an 'egress' packet (i.e. one on which | ||
630 | * @ref netio_populate_buffer() or @ref netio_populate_prepend_buffer() | ||
631 | * have been called, or one retrieved by @ref netio_get_buffer()). Use of | ||
632 | * this function on an 'ingress' packet will cause an assertion failure. | ||
633 | * | ||
634 | * @param[in] pkt Packet on which to operate. | ||
635 | * @return A pointer to the packet's standard metadata. | ||
636 | */ | ||
637 | static __inline netio_pkt_minimal_metadata_t* | ||
638 | NETIO_PKT_MINIMAL_METADATA(netio_pkt_t* pkt) | ||
639 | { | ||
640 | netio_assert(pkt->__packet.bits.__minimal); | ||
641 | return (netio_pkt_minimal_metadata_t*) &pkt->__metadata; | ||
642 | } | ||
643 | |||
644 | |||
645 | /** Determine whether a packet has 'minimal' metadata. | ||
646 | * @ingroup pktfuncs | ||
647 | * | ||
648 | * This function will return nonzero if the packet is an 'egress' | ||
649 | * packet (i.e. one on which @ref netio_populate_buffer() or | ||
650 | * @ref netio_populate_prepend_buffer() have been called, or one | ||
651 | * retrieved by @ref netio_get_buffer()), and zero if the packet | ||
652 | * is an 'ingress' packet (i.e. one retrieved by @ref netio_get_packet(), | ||
653 | * which has not been converted into an 'egress' packet). | ||
654 | * | ||
655 | * @param[in] pkt Packet on which to operate. | ||
656 | * @return Nonzero if the packet has minimal metadata. | ||
657 | */ | ||
658 | static __inline unsigned int | ||
659 | NETIO_PKT_IS_MINIMAL(netio_pkt_t* pkt) | ||
660 | { | ||
661 | return pkt->__packet.bits.__minimal; | ||
662 | } | ||
663 | |||
664 | |||
665 | /** Return a handle for a packet's storage. | ||
666 | * @ingroup pktfuncs | ||
667 | * | ||
668 | * @param[in] pkt Packet on which to operate. | ||
669 | * @return A handle for the packet's storage. | ||
670 | */ | ||
671 | static __inline netio_pkt_handle_t | ||
672 | NETIO_PKT_HANDLE(netio_pkt_t* pkt) | ||
673 | { | ||
674 | netio_pkt_handle_t h; | ||
675 | h.word = pkt->__packet.word; | ||
676 | return h; | ||
677 | } | ||
678 | |||
679 | |||
680 | /** A special reserved value indicating the absence of a packet handle. | ||
681 | * | ||
682 | * @ingroup pktfuncs | ||
683 | */ | ||
684 | #define NETIO_PKT_HANDLE_NONE ((netio_pkt_handle_t) { 0 }) | ||
685 | |||
686 | |||
687 | /** Test whether a packet handle is valid. | ||
688 | * | ||
689 | * Applications may wish to use the reserved value NETIO_PKT_HANDLE_NONE | ||
690 | * to indicate no packet at all. This function tests to see if a packet | ||
691 | * handle is a real handle, not this special reserved value. | ||
692 | * | ||
693 | * @ingroup pktfuncs | ||
694 | * | ||
695 | * @param[in] handle Handle on which to operate. | ||
696 | * @return One if the packet handle is valid, else zero. | ||
697 | */ | ||
698 | static __inline unsigned int | ||
699 | NETIO_PKT_HANDLE_IS_VALID(netio_pkt_handle_t handle) | ||
700 | { | ||
701 | return handle.word != 0; | ||
702 | } | ||
703 | |||
704 | |||
705 | |||
706 | /** Return a pointer to the start of the packet's custom header. | ||
707 | * A custom header may or may not be present, depending upon the IPP; its | ||
708 | * contents and alignment are also IPP-dependent. Currently, none of the | ||
709 | * standard IPPs supplied by Tilera produce a custom header. If present, | ||
710 | * the custom header precedes the L2 header in the packet buffer. | ||
711 | * @ingroup ingress | ||
712 | * | ||
713 | * @param[in] handle Handle on which to operate. | ||
714 | * @return A pointer to start of the packet. | ||
715 | */ | ||
716 | static __inline unsigned char* | ||
717 | NETIO_PKT_CUSTOM_DATA_H(netio_pkt_handle_t handle) | ||
718 | { | ||
719 | return _NETIO_PKT_HANDLE_BASE(handle) + NETIO_PACKET_PADDING; | ||
720 | } | ||
721 | |||
722 | |||
723 | /** Return the length of the packet's custom header. | ||
724 | * A custom header may or may not be present, depending upon the IPP; its | ||
725 | * contents and alignment are also IPP-dependent. Currently, none of the | ||
726 | * standard IPPs supplied by Tilera produce a custom header. If present, | ||
727 | * the custom header precedes the L2 header in the packet buffer. | ||
728 | * | ||
729 | * @ingroup ingress | ||
730 | * | ||
731 | * @param[in] mda Pointer to packet's standard metadata. | ||
732 | * @param[in] pkt Packet on which to operate. | ||
733 | * @return The length of the packet's custom header, in bytes. | ||
734 | */ | ||
735 | static __inline netio_size_t | ||
736 | NETIO_PKT_CUSTOM_HEADER_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
737 | { | ||
738 | /* | ||
739 | * Note that we effectively need to extract a quantity from the flags word | ||
740 | * which is measured in words, and then turn it into bytes by shifting | ||
741 | * it left by 2. We do this all at once by just shifting right two less | ||
742 | * bits, and shifting the mask up two bits. | ||
743 | */ | ||
744 | return ((mda->__flags >> (_NETIO_PKT_CUSTOM_LEN_SHIFT - 2)) & | ||
745 | (_NETIO_PKT_CUSTOM_LEN_RMASK << 2)); | ||
746 | } | ||
747 | |||
748 | |||
749 | /** Return the length of the packet, starting with the custom header. | ||
750 | * A custom header may or may not be present, depending upon the IPP; its | ||
751 | * contents and alignment are also IPP-dependent. Currently, none of the | ||
752 | * standard IPPs supplied by Tilera produce a custom header. If present, | ||
753 | * the custom header precedes the L2 header in the packet buffer. | ||
754 | * @ingroup ingress | ||
755 | * | ||
756 | * @param[in] mda Pointer to packet's standard metadata. | ||
757 | * @param[in] pkt Packet on which to operate. | ||
758 | * @return The length of the packet, in bytes. | ||
759 | */ | ||
760 | static __inline netio_size_t | ||
761 | NETIO_PKT_CUSTOM_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
762 | { | ||
763 | return (__NETIO_PKT_NOTIF_HEADER(pkt).bits.__transfer_size - | ||
764 | NETIO_PACKET_PADDING); | ||
765 | } | ||
766 | |||
767 | |||
768 | /** Return a pointer to the start of the packet's custom header. | ||
769 | * A custom header may or may not be present, depending upon the IPP; its | ||
770 | * contents and alignment are also IPP-dependent. Currently, none of the | ||
771 | * standard IPPs supplied by Tilera produce a custom header. If present, | ||
772 | * the custom header precedes the L2 header in the packet buffer. | ||
773 | * @ingroup ingress | ||
774 | * | ||
775 | * @param[in] mda Pointer to packet's standard metadata. | ||
776 | * @param[in] pkt Packet on which to operate. | ||
777 | * @return A pointer to start of the packet. | ||
778 | */ | ||
779 | static __inline unsigned char* | ||
780 | NETIO_PKT_CUSTOM_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
781 | { | ||
782 | return NETIO_PKT_CUSTOM_DATA_H(NETIO_PKT_HANDLE(pkt)); | ||
783 | } | ||
784 | |||
785 | |||
786 | /** Return the length of the packet's L2 (Ethernet plus VLAN or SNAP) header. | ||
787 | * @ingroup ingress | ||
788 | * | ||
789 | * @param[in] mda Pointer to packet's standard metadata. | ||
790 | * @param[in] pkt Packet on which to operate. | ||
791 | * @return The length of the packet's L2 header, in bytes. | ||
792 | */ | ||
793 | static __inline netio_size_t | ||
794 | NETIO_PKT_L2_HEADER_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
795 | { | ||
796 | /* | ||
797 | * Note that we effectively need to extract a quantity from the flags word | ||
798 | * which is measured in words, and then turn it into bytes by shifting | ||
799 | * it left by 2. We do this all at once by just shifting right two less | ||
800 | * bits, and shifting the mask up two bits. We then add two bytes. | ||
801 | */ | ||
802 | return ((mda->__flags >> (_NETIO_PKT_L2_LEN_SHIFT - 2)) & | ||
803 | (_NETIO_PKT_L2_LEN_RMASK << 2)) + 2; | ||
804 | } | ||
805 | |||
806 | |||
807 | /** Return the length of the packet, starting with the L2 (Ethernet) header. | ||
808 | * @ingroup ingress | ||
809 | * | ||
810 | * @param[in] mda Pointer to packet's standard metadata. | ||
811 | * @param[in] pkt Packet on which to operate. | ||
812 | * @return The length of the packet, in bytes. | ||
813 | */ | ||
814 | static __inline netio_size_t | ||
815 | NETIO_PKT_L2_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
816 | { | ||
817 | return (NETIO_PKT_CUSTOM_LENGTH_M(mda, pkt) - | ||
818 | NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda,pkt)); | ||
819 | } | ||
820 | |||
821 | |||
822 | /** Return a pointer to the start of the packet's L2 (Ethernet) header. | ||
823 | * @ingroup ingress | ||
824 | * | ||
825 | * @param[in] mda Pointer to packet's standard metadata. | ||
826 | * @param[in] pkt Packet on which to operate. | ||
827 | * @return A pointer to start of the packet. | ||
828 | */ | ||
829 | static __inline unsigned char* | ||
830 | NETIO_PKT_L2_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
831 | { | ||
832 | return (NETIO_PKT_CUSTOM_DATA_M(mda, pkt) + | ||
833 | NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt)); | ||
834 | } | ||
835 | |||
836 | |||
837 | /** Retrieve the length of the packet, starting with the L3 (generally, | ||
838 | * the IP) header. | ||
839 | * @ingroup ingress | ||
840 | * | ||
841 | * @param[in] mda Pointer to packet's standard metadata. | ||
842 | * @param[in] pkt Packet on which to operate. | ||
843 | * @return Length of the packet's L3 header and data, in bytes. | ||
844 | */ | ||
845 | static __inline netio_size_t | ||
846 | NETIO_PKT_L3_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
847 | { | ||
848 | return (NETIO_PKT_L2_LENGTH_M(mda, pkt) - | ||
849 | NETIO_PKT_L2_HEADER_LENGTH_M(mda,pkt)); | ||
850 | } | ||
851 | |||
852 | |||
853 | /** Return a pointer to the packet's L3 (generally, the IP) header. | ||
854 | * @ingroup ingress | ||
855 | * | ||
856 | * Note that we guarantee word alignment of the L3 header. | ||
857 | * | ||
858 | * @param[in] mda Pointer to packet's standard metadata. | ||
859 | * @param[in] pkt Packet on which to operate. | ||
860 | * @return A pointer to the packet's L3 header. | ||
861 | */ | ||
862 | static __inline unsigned char* | ||
863 | NETIO_PKT_L3_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
864 | { | ||
865 | return (NETIO_PKT_L2_DATA_M(mda, pkt) + | ||
866 | NETIO_PKT_L2_HEADER_LENGTH_M(mda, pkt)); | ||
867 | } | ||
868 | |||
869 | |||
870 | /** Return the ordinal of the packet. | ||
871 | * @ingroup ingress | ||
872 | * | ||
873 | * Each packet is given an ordinal number when it is delivered by the IPP. | ||
874 | * In the medium term, the ordinal is unique and monotonically increasing, | ||
875 | * being incremented by 1 for each packet; the ordinal of the first packet | ||
876 | * delivered after the IPP starts is zero. (Since the ordinal is of finite | ||
877 | * size, given enough input packets, it will eventually wrap around to zero; | ||
878 | * in the long term, therefore, ordinals are not unique.) The ordinals | ||
879 | * handed out by different IPPs are not disjoint, so two packets from | ||
880 | * different IPPs may have identical ordinals. Packets dropped by the | ||
881 | * IPP or by the I/O shim are not assigned ordinals. | ||
882 | * | ||
883 | * @param[in] mda Pointer to packet's standard metadata. | ||
884 | * @param[in] pkt Packet on which to operate. | ||
885 | * @return The packet's per-IPP packet ordinal. | ||
886 | */ | ||
887 | static __inline unsigned int | ||
888 | NETIO_PKT_ORDINAL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
889 | { | ||
890 | return mda->__packet_ordinal; | ||
891 | } | ||
892 | |||
893 | |||
894 | /** Return the per-group ordinal of the packet. | ||
895 | * @ingroup ingress | ||
896 | * | ||
897 | * Each packet is given a per-group ordinal number when it is | ||
898 | * delivered by the IPP. By default, the group is the packet's VLAN, | ||
899 | * although IPP can be recompiled to use different values. In | ||
900 | * the medium term, the ordinal is unique and monotonically | ||
901 | * increasing, being incremented by 1 for each packet; the ordinal of | ||
902 | * the first packet distributed to a particular group is zero. | ||
903 | * (Since the ordinal is of finite size, given enough input packets, | ||
904 | * it will eventually wrap around to zero; in the long term, | ||
905 | * therefore, ordinals are not unique.) The ordinals handed out by | ||
906 | * different IPPs are not disjoint, so two packets from different IPPs | ||
907 | * may have identical ordinals; similarly, packets distributed to | ||
908 | * different groups may have identical ordinals. Packets dropped by | ||
909 | * the IPP or by the I/O shim are not assigned ordinals. | ||
910 | * | ||
911 | * @param[in] mda Pointer to packet's standard metadata. | ||
912 | * @param[in] pkt Packet on which to operate. | ||
913 | * @return The packet's per-IPP, per-group ordinal. | ||
914 | */ | ||
915 | static __inline unsigned int | ||
916 | NETIO_PKT_GROUP_ORDINAL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
917 | { | ||
918 | return mda->__group_ordinal; | ||
919 | } | ||
920 | |||
921 | |||
922 | /** Return the VLAN ID assigned to the packet. | ||
923 | * @ingroup ingress | ||
924 | * | ||
925 | * This value is usually contained within the packet header. | ||
926 | * | ||
927 | * This value will be zero if the packet does not have a VLAN tag, or if | ||
928 | * this value was not extracted from the packet. | ||
929 | * | ||
930 | * @param[in] mda Pointer to packet's standard metadata. | ||
931 | * @param[in] pkt Packet on which to operate. | ||
932 | * @return The packet's VLAN ID. | ||
933 | */ | ||
934 | static __inline unsigned short | ||
935 | NETIO_PKT_VLAN_ID_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
936 | { | ||
937 | int vl = (mda->__flags >> _NETIO_PKT_VLAN_SHIFT) & _NETIO_PKT_VLAN_RMASK; | ||
938 | unsigned short* pkt_p; | ||
939 | int index; | ||
940 | unsigned short val; | ||
941 | |||
942 | if (vl == _NETIO_PKT_VLAN_NONE) | ||
943 | return 0; | ||
944 | |||
945 | pkt_p = (unsigned short*) NETIO_PKT_L2_DATA_M(mda, pkt); | ||
946 | index = (mda->__flags >> _NETIO_PKT_TYPE_SHIFT) & _NETIO_PKT_TYPE_RMASK; | ||
947 | |||
948 | val = pkt_p[(_netio_pkt_info[index] >> _NETIO_PKT_INFO_VLAN_SHIFT) & | ||
949 | _NETIO_PKT_INFO_VLAN_RMASK]; | ||
950 | |||
951 | #ifdef __TILECC__ | ||
952 | return (__insn_bytex(val) >> 16) & 0xFFF; | ||
953 | #else | ||
954 | return (__builtin_bswap32(val) >> 16) & 0xFFF; | ||
955 | #endif | ||
956 | } | ||
957 | |||
958 | |||
959 | /** Return the ethertype of the packet. | ||
960 | * @ingroup ingress | ||
961 | * | ||
962 | * This value is usually contained within the packet header. | ||
963 | * | ||
964 | * This value is reliable if @ref NETIO_PKT_ETHERTYPE_RECOGNIZED_M() | ||
965 | * returns true, and otherwise, may not be well defined. | ||
966 | * | ||
967 | * @param[in] mda Pointer to packet's standard metadata. | ||
968 | * @param[in] pkt Packet on which to operate. | ||
969 | * @return The packet's ethertype. | ||
970 | */ | ||
971 | static __inline unsigned short | ||
972 | NETIO_PKT_ETHERTYPE_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
973 | { | ||
974 | unsigned short* pkt_p = (unsigned short*) NETIO_PKT_L2_DATA_M(mda, pkt); | ||
975 | int index = (mda->__flags >> _NETIO_PKT_TYPE_SHIFT) & _NETIO_PKT_TYPE_RMASK; | ||
976 | |||
977 | unsigned short val = | ||
978 | pkt_p[(_netio_pkt_info[index] >> _NETIO_PKT_INFO_ETYPE_SHIFT) & | ||
979 | _NETIO_PKT_INFO_ETYPE_RMASK]; | ||
980 | |||
981 | return __builtin_bswap32(val) >> 16; | ||
982 | } | ||
983 | |||
984 | |||
985 | /** Return the flow hash computed on the packet. | ||
986 | * @ingroup ingress | ||
987 | * | ||
988 | * For TCP and UDP packets, this hash is calculated by hashing together | ||
989 | * the "5-tuple" values, specifically the source IP address, destination | ||
990 | * IP address, protocol type, source port and destination port. | ||
991 | * The hash value is intended to be helpful for millions of distinct | ||
992 | * flows. | ||
993 | * | ||
994 | * For IPv4 or IPv6 packets which are neither TCP nor UDP, the flow hash is | ||
995 | * derived by hashing together the source and destination IP addresses. | ||
996 | * | ||
997 | * For MPLS-encapsulated packets, the flow hash is derived by hashing | ||
998 | * the first MPLS label. | ||
999 | * | ||
1000 | * For all other packets the flow hash is computed from the source | ||
1001 | * and destination Ethernet addresses. | ||
1002 | * | ||
1003 | * The hash is symmetric, meaning it produces the same value if the | ||
1004 | * source and destination are swapped. The only exceptions are | ||
1005 | * tunneling protocols 0x04 (IP in IP Encapsulation), 0x29 (Simple | ||
1006 | * Internet Protocol), 0x2F (General Routing Encapsulation) and 0x32 | ||
1007 | * (Encap Security Payload), which use only the destination address | ||
1008 | * since the source address is not meaningful. | ||
1009 | * | ||
1010 | * @param[in] mda Pointer to packet's standard metadata. | ||
1011 | * @param[in] pkt Packet on which to operate. | ||
1012 | * @return The packet's 32-bit flow hash. | ||
1013 | */ | ||
1014 | static __inline unsigned int | ||
1015 | NETIO_PKT_FLOW_HASH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1016 | { | ||
1017 | return mda->__flow_hash; | ||
1018 | } | ||
1019 | |||
1020 | |||
1021 | /** Return the first word of "user data" for the packet. | ||
1022 | * | ||
1023 | * The contents of the user data words depend on the IPP. | ||
1024 | * | ||
1025 | * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the first | ||
1026 | * word of user data contains the least significant bits of the 64-bit | ||
1027 | * arrival cycle count (see @c get_cycle_count_low()). | ||
1028 | * | ||
1029 | * See the <em>System Programmer's Guide</em> for details. | ||
1030 | * | ||
1031 | * @ingroup ingress | ||
1032 | * | ||
1033 | * @param[in] mda Pointer to packet's standard metadata. | ||
1034 | * @param[in] pkt Packet on which to operate. | ||
1035 | * @return The packet's first word of "user data". | ||
1036 | */ | ||
1037 | static __inline unsigned int | ||
1038 | NETIO_PKT_USER_DATA_0_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1039 | { | ||
1040 | return mda->__user_data_0; | ||
1041 | } | ||
1042 | |||
1043 | |||
1044 | /** Return the second word of "user data" for the packet. | ||
1045 | * | ||
1046 | * The contents of the user data words depend on the IPP. | ||
1047 | * | ||
1048 | * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the second | ||
1049 | * word of user data contains the most significant bits of the 64-bit | ||
1050 | * arrival cycle count (see @c get_cycle_count_high()). | ||
1051 | * | ||
1052 | * See the <em>System Programmer's Guide</em> for details. | ||
1053 | * | ||
1054 | * @ingroup ingress | ||
1055 | * | ||
1056 | * @param[in] mda Pointer to packet's standard metadata. | ||
1057 | * @param[in] pkt Packet on which to operate. | ||
1058 | * @return The packet's second word of "user data". | ||
1059 | */ | ||
1060 | static __inline unsigned int | ||
1061 | NETIO_PKT_USER_DATA_1_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1062 | { | ||
1063 | return mda->__user_data_1; | ||
1064 | } | ||
1065 | |||
1066 | |||
1067 | /** Determine whether the L4 (TCP/UDP) checksum was calculated. | ||
1068 | * @ingroup ingress | ||
1069 | * | ||
1070 | * @param[in] mda Pointer to packet's standard metadata. | ||
1071 | * @param[in] pkt Packet on which to operate. | ||
1072 | * @return Nonzero if the L4 checksum was calculated. | ||
1073 | */ | ||
1074 | static __inline unsigned int | ||
1075 | NETIO_PKT_L4_CSUM_CALCULATED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1076 | { | ||
1077 | return !(mda->__flags & _NETIO_PKT_NO_L4_CSUM_MASK); | ||
1078 | } | ||
1079 | |||
1080 | |||
1081 | /** Determine whether the L4 (TCP/UDP) checksum was calculated and found to | ||
1082 | * be correct. | ||
1083 | * @ingroup ingress | ||
1084 | * | ||
1085 | * @param[in] mda Pointer to packet's standard metadata. | ||
1086 | * @param[in] pkt Packet on which to operate. | ||
1087 | * @return Nonzero if the checksum was calculated and is correct. | ||
1088 | */ | ||
1089 | static __inline unsigned int | ||
1090 | NETIO_PKT_L4_CSUM_CORRECT_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1091 | { | ||
1092 | return !(mda->__flags & | ||
1093 | (_NETIO_PKT_BAD_L4_CSUM_MASK | _NETIO_PKT_NO_L4_CSUM_MASK)); | ||
1094 | } | ||
1095 | |||
1096 | |||
1097 | /** Determine whether the L3 (IP) checksum was calculated. | ||
1098 | * @ingroup ingress | ||
1099 | * | ||
1100 | * @param[in] mda Pointer to packet's standard metadata. | ||
1101 | * @param[in] pkt Packet on which to operate. | ||
1102 | * @return Nonzero if the L3 (IP) checksum was calculated. | ||
1103 | */ | ||
1104 | static __inline unsigned int | ||
1105 | NETIO_PKT_L3_CSUM_CALCULATED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1106 | { | ||
1107 | return !(mda->__flags & _NETIO_PKT_NO_L3_CSUM_MASK); | ||
1108 | } | ||
1109 | |||
1110 | |||
1111 | /** Determine whether the L3 (IP) checksum was calculated and found to be | ||
1112 | * correct. | ||
1113 | * @ingroup ingress | ||
1114 | * | ||
1115 | * @param[in] mda Pointer to packet's standard metadata. | ||
1116 | * @param[in] pkt Packet on which to operate. | ||
1117 | * @return Nonzero if the checksum was calculated and is correct. | ||
1118 | */ | ||
1119 | static __inline unsigned int | ||
1120 | NETIO_PKT_L3_CSUM_CORRECT_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1121 | { | ||
1122 | return !(mda->__flags & | ||
1123 | (_NETIO_PKT_BAD_L3_CSUM_MASK | _NETIO_PKT_NO_L3_CSUM_MASK)); | ||
1124 | } | ||
1125 | |||
1126 | |||
1127 | /** Determine whether the ethertype was recognized and L3 packet data was | ||
1128 | * processed. | ||
1129 | * @ingroup ingress | ||
1130 | * | ||
1131 | * @param[in] mda Pointer to packet's standard metadata. | ||
1132 | * @param[in] pkt Packet on which to operate. | ||
1133 | * @return Nonzero if the ethertype was recognized and L3 packet data was | ||
1134 | * processed. | ||
1135 | */ | ||
1136 | static __inline unsigned int | ||
1137 | NETIO_PKT_ETHERTYPE_RECOGNIZED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1138 | { | ||
1139 | return !(mda->__flags & _NETIO_PKT_TYPE_UNRECOGNIZED_MASK); | ||
1140 | } | ||
1141 | |||
1142 | |||
1143 | /** Retrieve the status of a packet and any errors that may have occurred | ||
1144 | * during ingress processing (length mismatches, CRC errors, etc.). | ||
1145 | * @ingroup ingress | ||
1146 | * | ||
1147 | * Note that packets for which @ref NETIO_PKT_ETHERTYPE_RECOGNIZED() | ||
1148 | * returns zero are always reported as underlength, as there is no a priori | ||
1149 | * means to determine their length. Normally, applications should use | ||
1150 | * @ref NETIO_PKT_BAD_M() instead of explicitly checking status with this | ||
1151 | * function. | ||
1152 | * | ||
1153 | * @param[in] mda Pointer to packet's standard metadata. | ||
1154 | * @param[in] pkt Packet on which to operate. | ||
1155 | * @return The packet's status. | ||
1156 | */ | ||
1157 | static __inline netio_pkt_status_t | ||
1158 | NETIO_PKT_STATUS_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1159 | { | ||
1160 | return (netio_pkt_status_t) __NETIO_PKT_NOTIF_HEADER(pkt).bits.__status; | ||
1161 | } | ||
1162 | |||
1163 | |||
1164 | /** Report whether a packet is bad (i.e., was shorter than expected based on | ||
1165 | * its headers, or had a bad CRC). | ||
1166 | * @ingroup ingress | ||
1167 | * | ||
1168 | * Note that this function does not verify L3 or L4 checksums. | ||
1169 | * | ||
1170 | * @param[in] mda Pointer to packet's standard metadata. | ||
1171 | * @param[in] pkt Packet on which to operate. | ||
1172 | * @return Nonzero if the packet is bad and should be discarded. | ||
1173 | */ | ||
1174 | static __inline unsigned int | ||
1175 | NETIO_PKT_BAD_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1176 | { | ||
1177 | return ((NETIO_PKT_STATUS_M(mda, pkt) & 1) && | ||
1178 | (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(mda, pkt) || | ||
1179 | NETIO_PKT_STATUS_M(mda, pkt) == NETIO_PKT_STATUS_BAD)); | ||
1180 | } | ||
1181 | |||
1182 | |||
1183 | /** Return the length of the packet, starting with the L2 (Ethernet) header. | ||
1184 | * @ingroup egress | ||
1185 | * | ||
1186 | * @param[in] mmd Pointer to packet's minimal metadata. | ||
1187 | * @param[in] pkt Packet on which to operate. | ||
1188 | * @return The length of the packet, in bytes. | ||
1189 | */ | ||
1190 | static __inline netio_size_t | ||
1191 | NETIO_PKT_L2_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt) | ||
1192 | { | ||
1193 | return mmd->l2_length; | ||
1194 | } | ||
1195 | |||
1196 | |||
1197 | /** Return the length of the L2 (Ethernet) header. | ||
1198 | * @ingroup egress | ||
1199 | * | ||
1200 | * @param[in] mmd Pointer to packet's minimal metadata. | ||
1201 | * @param[in] pkt Packet on which to operate. | ||
1202 | * @return The length of the packet's L2 header, in bytes. | ||
1203 | */ | ||
1204 | static __inline netio_size_t | ||
1205 | NETIO_PKT_L2_HEADER_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, | ||
1206 | netio_pkt_t* pkt) | ||
1207 | { | ||
1208 | return mmd->l3_offset - mmd->l2_offset; | ||
1209 | } | ||
1210 | |||
1211 | |||
1212 | /** Return the length of the packet, starting with the L3 (IP) header. | ||
1213 | * @ingroup egress | ||
1214 | * | ||
1215 | * @param[in] mmd Pointer to packet's minimal metadata. | ||
1216 | * @param[in] pkt Packet on which to operate. | ||
1217 | * @return Length of the packet's L3 header and data, in bytes. | ||
1218 | */ | ||
1219 | static __inline netio_size_t | ||
1220 | NETIO_PKT_L3_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt) | ||
1221 | { | ||
1222 | return (NETIO_PKT_L2_LENGTH_MM(mmd, pkt) - | ||
1223 | NETIO_PKT_L2_HEADER_LENGTH_MM(mmd, pkt)); | ||
1224 | } | ||
1225 | |||
1226 | |||
1227 | /** Return a pointer to the packet's L3 (generally, the IP) header. | ||
1228 | * @ingroup egress | ||
1229 | * | ||
1230 | * Note that we guarantee word alignment of the L3 header. | ||
1231 | * | ||
1232 | * @param[in] mmd Pointer to packet's minimal metadata. | ||
1233 | * @param[in] pkt Packet on which to operate. | ||
1234 | * @return A pointer to the packet's L3 header. | ||
1235 | */ | ||
1236 | static __inline unsigned char* | ||
1237 | NETIO_PKT_L3_DATA_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt) | ||
1238 | { | ||
1239 | return _NETIO_PKT_BASE(pkt) + mmd->l3_offset; | ||
1240 | } | ||
1241 | |||
1242 | |||
1243 | /** Return a pointer to the packet's L2 (Ethernet) header. | ||
1244 | * @ingroup egress | ||
1245 | * | ||
1246 | * @param[in] mmd Pointer to packet's minimal metadata. | ||
1247 | * @param[in] pkt Packet on which to operate. | ||
1248 | * @return A pointer to start of the packet. | ||
1249 | */ | ||
1250 | static __inline unsigned char* | ||
1251 | NETIO_PKT_L2_DATA_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt) | ||
1252 | { | ||
1253 | return _NETIO_PKT_BASE(pkt) + mmd->l2_offset; | ||
1254 | } | ||
1255 | |||
1256 | |||
1257 | /** Retrieve the status of a packet and any errors that may have occurred | ||
1258 | * during ingress processing (length mismatches, CRC errors, etc.). | ||
1259 | * @ingroup ingress | ||
1260 | * | ||
1261 | * Note that packets for which @ref NETIO_PKT_ETHERTYPE_RECOGNIZED() | ||
1262 | * returns zero are always reported as underlength, as there is no a priori | ||
1263 | * means to determine their length. Normally, applications should use | ||
1264 | * @ref NETIO_PKT_BAD() instead of explicitly checking status with this | ||
1265 | * function. | ||
1266 | * | ||
1267 | * @param[in] pkt Packet on which to operate. | ||
1268 | * @return The packet's status. | ||
1269 | */ | ||
1270 | static __inline netio_pkt_status_t | ||
1271 | NETIO_PKT_STATUS(netio_pkt_t* pkt) | ||
1272 | { | ||
1273 | netio_assert(!pkt->__packet.bits.__minimal); | ||
1274 | |||
1275 | return (netio_pkt_status_t) __NETIO_PKT_NOTIF_HEADER(pkt).bits.__status; | ||
1276 | } | ||
1277 | |||
1278 | |||
1279 | /** Report whether a packet is bad (i.e., was shorter than expected based on | ||
1280 | * its headers, or had a bad CRC). | ||
1281 | * @ingroup ingress | ||
1282 | * | ||
1283 | * Note that this function does not verify L3 or L4 checksums. | ||
1284 | * | ||
1285 | * @param[in] pkt Packet on which to operate. | ||
1286 | * @return Nonzero if the packet is bad and should be discarded. | ||
1287 | */ | ||
1288 | static __inline unsigned int | ||
1289 | NETIO_PKT_BAD(netio_pkt_t* pkt) | ||
1290 | { | ||
1291 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1292 | |||
1293 | return NETIO_PKT_BAD_M(mda, pkt); | ||
1294 | } | ||
1295 | |||
1296 | |||
1297 | /** Return the length of the packet's custom header. | ||
1298 | * A custom header may or may not be present, depending upon the IPP; its | ||
1299 | * contents and alignment are also IPP-dependent. Currently, none of the | ||
1300 | * standard IPPs supplied by Tilera produce a custom header. If present, | ||
1301 | * the custom header precedes the L2 header in the packet buffer. | ||
1302 | * @ingroup pktfuncs | ||
1303 | * | ||
1304 | * @param[in] pkt Packet on which to operate. | ||
1305 | * @return The length of the packet's custom header, in bytes. | ||
1306 | */ | ||
1307 | static __inline netio_size_t | ||
1308 | NETIO_PKT_CUSTOM_HEADER_LENGTH(netio_pkt_t* pkt) | ||
1309 | { | ||
1310 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1311 | |||
1312 | return NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt); | ||
1313 | } | ||
1314 | |||
1315 | |||
1316 | /** Return the length of the packet, starting with the custom header. | ||
1317 | * A custom header may or may not be present, depending upon the IPP; its | ||
1318 | * contents and alignment are also IPP-dependent. Currently, none of the | ||
1319 | * standard IPPs supplied by Tilera produce a custom header. If present, | ||
1320 | * the custom header precedes the L2 header in the packet buffer. | ||
1321 | * @ingroup pktfuncs | ||
1322 | * | ||
1323 | * @param[in] pkt Packet on which to operate. | ||
1324 | * @return The length of the packet, in bytes. | ||
1325 | */ | ||
1326 | static __inline netio_size_t | ||
1327 | NETIO_PKT_CUSTOM_LENGTH(netio_pkt_t* pkt) | ||
1328 | { | ||
1329 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1330 | |||
1331 | return NETIO_PKT_CUSTOM_LENGTH_M(mda, pkt); | ||
1332 | } | ||
1333 | |||
1334 | |||
1335 | /** Return a pointer to the packet's custom header. | ||
1336 | * A custom header may or may not be present, depending upon the IPP; its | ||
1337 | * contents and alignment are also IPP-dependent. Currently, none of the | ||
1338 | * standard IPPs supplied by Tilera produce a custom header. If present, | ||
1339 | * the custom header precedes the L2 header in the packet buffer. | ||
1340 | * @ingroup pktfuncs | ||
1341 | * | ||
1342 | * @param[in] pkt Packet on which to operate. | ||
1343 | * @return A pointer to start of the packet. | ||
1344 | */ | ||
1345 | static __inline unsigned char* | ||
1346 | NETIO_PKT_CUSTOM_DATA(netio_pkt_t* pkt) | ||
1347 | { | ||
1348 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1349 | |||
1350 | return NETIO_PKT_CUSTOM_DATA_M(mda, pkt); | ||
1351 | } | ||
1352 | |||
1353 | |||
1354 | /** Return the length of the packet's L2 (Ethernet plus VLAN or SNAP) header. | ||
1355 | * @ingroup pktfuncs | ||
1356 | * | ||
1357 | * @param[in] pkt Packet on which to operate. | ||
1358 | * @return The length of the packet's L2 header, in bytes. | ||
1359 | */ | ||
1360 | static __inline netio_size_t | ||
1361 | NETIO_PKT_L2_HEADER_LENGTH(netio_pkt_t* pkt) | ||
1362 | { | ||
1363 | if (NETIO_PKT_IS_MINIMAL(pkt)) | ||
1364 | { | ||
1365 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1366 | |||
1367 | return NETIO_PKT_L2_HEADER_LENGTH_MM(mmd, pkt); | ||
1368 | } | ||
1369 | else | ||
1370 | { | ||
1371 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1372 | |||
1373 | return NETIO_PKT_L2_HEADER_LENGTH_M(mda, pkt); | ||
1374 | } | ||
1375 | } | ||
1376 | |||
1377 | |||
1378 | /** Return the length of the packet, starting with the L2 (Ethernet) header. | ||
1379 | * @ingroup pktfuncs | ||
1380 | * | ||
1381 | * @param[in] pkt Packet on which to operate. | ||
1382 | * @return The length of the packet, in bytes. | ||
1383 | */ | ||
1384 | static __inline netio_size_t | ||
1385 | NETIO_PKT_L2_LENGTH(netio_pkt_t* pkt) | ||
1386 | { | ||
1387 | if (NETIO_PKT_IS_MINIMAL(pkt)) | ||
1388 | { | ||
1389 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1390 | |||
1391 | return NETIO_PKT_L2_LENGTH_MM(mmd, pkt); | ||
1392 | } | ||
1393 | else | ||
1394 | { | ||
1395 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1396 | |||
1397 | return NETIO_PKT_L2_LENGTH_M(mda, pkt); | ||
1398 | } | ||
1399 | } | ||
1400 | |||
1401 | |||
1402 | /** Return a pointer to the packet's L2 (Ethernet) header. | ||
1403 | * @ingroup pktfuncs | ||
1404 | * | ||
1405 | * @param[in] pkt Packet on which to operate. | ||
1406 | * @return A pointer to start of the packet. | ||
1407 | */ | ||
1408 | static __inline unsigned char* | ||
1409 | NETIO_PKT_L2_DATA(netio_pkt_t* pkt) | ||
1410 | { | ||
1411 | if (NETIO_PKT_IS_MINIMAL(pkt)) | ||
1412 | { | ||
1413 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1414 | |||
1415 | return NETIO_PKT_L2_DATA_MM(mmd, pkt); | ||
1416 | } | ||
1417 | else | ||
1418 | { | ||
1419 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1420 | |||
1421 | return NETIO_PKT_L2_DATA_M(mda, pkt); | ||
1422 | } | ||
1423 | } | ||
1424 | |||
1425 | |||
1426 | /** Retrieve the length of the packet, starting with the L3 (generally, the IP) | ||
1427 | * header. | ||
1428 | * @ingroup pktfuncs | ||
1429 | * | ||
1430 | * @param[in] pkt Packet on which to operate. | ||
1431 | * @return Length of the packet's L3 header and data, in bytes. | ||
1432 | */ | ||
1433 | static __inline netio_size_t | ||
1434 | NETIO_PKT_L3_LENGTH(netio_pkt_t* pkt) | ||
1435 | { | ||
1436 | if (NETIO_PKT_IS_MINIMAL(pkt)) | ||
1437 | { | ||
1438 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1439 | |||
1440 | return NETIO_PKT_L3_LENGTH_MM(mmd, pkt); | ||
1441 | } | ||
1442 | else | ||
1443 | { | ||
1444 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1445 | |||
1446 | return NETIO_PKT_L3_LENGTH_M(mda, pkt); | ||
1447 | } | ||
1448 | } | ||
1449 | |||
1450 | |||
1451 | /** Return a pointer to the packet's L3 (generally, the IP) header. | ||
1452 | * @ingroup pktfuncs | ||
1453 | * | ||
1454 | * Note that we guarantee word alignment of the L3 header. | ||
1455 | * | ||
1456 | * @param[in] pkt Packet on which to operate. | ||
1457 | * @return A pointer to the packet's L3 header. | ||
1458 | */ | ||
1459 | static __inline unsigned char* | ||
1460 | NETIO_PKT_L3_DATA(netio_pkt_t* pkt) | ||
1461 | { | ||
1462 | if (NETIO_PKT_IS_MINIMAL(pkt)) | ||
1463 | { | ||
1464 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1465 | |||
1466 | return NETIO_PKT_L3_DATA_MM(mmd, pkt); | ||
1467 | } | ||
1468 | else | ||
1469 | { | ||
1470 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1471 | |||
1472 | return NETIO_PKT_L3_DATA_M(mda, pkt); | ||
1473 | } | ||
1474 | } | ||
1475 | |||
1476 | |||
1477 | /** Return the ordinal of the packet. | ||
1478 | * @ingroup ingress | ||
1479 | * | ||
1480 | * Each packet is given an ordinal number when it is delivered by the IPP. | ||
1481 | * In the medium term, the ordinal is unique and monotonically increasing, | ||
1482 | * being incremented by 1 for each packet; the ordinal of the first packet | ||
1483 | * delivered after the IPP starts is zero. (Since the ordinal is of finite | ||
1484 | * size, given enough input packets, it will eventually wrap around to zero; | ||
1485 | * in the long term, therefore, ordinals are not unique.) The ordinals | ||
1486 | * handed out by different IPPs are not disjoint, so two packets from | ||
1487 | * different IPPs may have identical ordinals. Packets dropped by the | ||
1488 | * IPP or by the I/O shim are not assigned ordinals. | ||
1489 | * | ||
1490 | * | ||
1491 | * @param[in] pkt Packet on which to operate. | ||
1492 | * @return The packet's per-IPP packet ordinal. | ||
1493 | */ | ||
1494 | static __inline unsigned int | ||
1495 | NETIO_PKT_ORDINAL(netio_pkt_t* pkt) | ||
1496 | { | ||
1497 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1498 | |||
1499 | return NETIO_PKT_ORDINAL_M(mda, pkt); | ||
1500 | } | ||
1501 | |||
1502 | |||
1503 | /** Return the per-group ordinal of the packet. | ||
1504 | * @ingroup ingress | ||
1505 | * | ||
1506 | * Each packet is given a per-group ordinal number when it is | ||
1507 | * delivered by the IPP. By default, the group is the packet's VLAN, | ||
1508 | * although IPP can be recompiled to use different values. In | ||
1509 | * the medium term, the ordinal is unique and monotonically | ||
1510 | * increasing, being incremented by 1 for each packet; the ordinal of | ||
1511 | * the first packet distributed to a particular group is zero. | ||
1512 | * (Since the ordinal is of finite size, given enough input packets, | ||
1513 | * it will eventually wrap around to zero; in the long term, | ||
1514 | * therefore, ordinals are not unique.) The ordinals handed out by | ||
1515 | * different IPPs are not disjoint, so two packets from different IPPs | ||
1516 | * may have identical ordinals; similarly, packets distributed to | ||
1517 | * different groups may have identical ordinals. Packets dropped by | ||
1518 | * the IPP or by the I/O shim are not assigned ordinals. | ||
1519 | * | ||
1520 | * @param[in] pkt Packet on which to operate. | ||
1521 | * @return The packet's per-IPP, per-group ordinal. | ||
1522 | */ | ||
1523 | static __inline unsigned int | ||
1524 | NETIO_PKT_GROUP_ORDINAL(netio_pkt_t* pkt) | ||
1525 | { | ||
1526 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1527 | |||
1528 | return NETIO_PKT_GROUP_ORDINAL_M(mda, pkt); | ||
1529 | } | ||
1530 | |||
1531 | |||
1532 | /** Return the VLAN ID assigned to the packet. | ||
1533 | * @ingroup ingress | ||
1534 | * | ||
1535 | * This is usually also contained within the packet header. If the packet | ||
1536 | * does not have a VLAN tag, the VLAN ID returned by this function is zero. | ||
1537 | * | ||
1538 | * @param[in] pkt Packet on which to operate. | ||
1539 | * @return The packet's VLAN ID. | ||
1540 | */ | ||
1541 | static __inline unsigned short | ||
1542 | NETIO_PKT_VLAN_ID(netio_pkt_t* pkt) | ||
1543 | { | ||
1544 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1545 | |||
1546 | return NETIO_PKT_VLAN_ID_M(mda, pkt); | ||
1547 | } | ||
1548 | |||
1549 | |||
1550 | /** Return the ethertype of the packet. | ||
1551 | * @ingroup ingress | ||
1552 | * | ||
1553 | * This value is reliable if @ref NETIO_PKT_ETHERTYPE_RECOGNIZED() | ||
1554 | * returns true, and otherwise, may not be well defined. | ||
1555 | * | ||
1556 | * @param[in] pkt Packet on which to operate. | ||
1557 | * @return The packet's ethertype. | ||
1558 | */ | ||
1559 | static __inline unsigned short | ||
1560 | NETIO_PKT_ETHERTYPE(netio_pkt_t* pkt) | ||
1561 | { | ||
1562 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1563 | |||
1564 | return NETIO_PKT_ETHERTYPE_M(mda, pkt); | ||
1565 | } | ||
1566 | |||
1567 | |||
1568 | /** Return the flow hash computed on the packet. | ||
1569 | * @ingroup ingress | ||
1570 | * | ||
1571 | * For TCP and UDP packets, this hash is calculated by hashing together | ||
1572 | * the "5-tuple" values, specifically the source IP address, destination | ||
1573 | * IP address, protocol type, source port and destination port. | ||
1574 | * The hash value is intended to be helpful for millions of distinct | ||
1575 | * flows. | ||
1576 | * | ||
1577 | * For IPv4 or IPv6 packets which are neither TCP nor UDP, the flow hash is | ||
1578 | * derived by hashing together the source and destination IP addresses. | ||
1579 | * | ||
1580 | * For MPLS-encapsulated packets, the flow hash is derived by hashing | ||
1581 | * the first MPLS label. | ||
1582 | * | ||
1583 | * For all other packets the flow hash is computed from the source | ||
1584 | * and destination Ethernet addresses. | ||
1585 | * | ||
1586 | * The hash is symmetric, meaning it produces the same value if the | ||
1587 | * source and destination are swapped. The only exceptions are | ||
1588 | * tunneling protocols 0x04 (IP in IP Encapsulation), 0x29 (Simple | ||
1589 | * Internet Protocol), 0x2F (General Routing Encapsulation) and 0x32 | ||
1590 | * (Encap Security Payload), which use only the destination address | ||
1591 | * since the source address is not meaningful. | ||
1592 | * | ||
1593 | * @param[in] pkt Packet on which to operate. | ||
1594 | * @return The packet's 32-bit flow hash. | ||
1595 | */ | ||
1596 | static __inline unsigned int | ||
1597 | NETIO_PKT_FLOW_HASH(netio_pkt_t* pkt) | ||
1598 | { | ||
1599 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1600 | |||
1601 | return NETIO_PKT_FLOW_HASH_M(mda, pkt); | ||
1602 | } | ||
1603 | |||
1604 | |||
1605 | /** Return the first word of "user data" for the packet. | ||
1606 | * | ||
1607 | * The contents of the user data words depend on the IPP. | ||
1608 | * | ||
1609 | * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the first | ||
1610 | * word of user data contains the least significant bits of the 64-bit | ||
1611 | * arrival cycle count (see @c get_cycle_count_low()). | ||
1612 | * | ||
1613 | * See the <em>System Programmer's Guide</em> for details. | ||
1614 | * | ||
1615 | * @ingroup ingress | ||
1616 | * | ||
1617 | * @param[in] pkt Packet on which to operate. | ||
1618 | * @return The packet's first word of "user data". | ||
1619 | */ | ||
1620 | static __inline unsigned int | ||
1621 | NETIO_PKT_USER_DATA_0(netio_pkt_t* pkt) | ||
1622 | { | ||
1623 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1624 | |||
1625 | return NETIO_PKT_USER_DATA_0_M(mda, pkt); | ||
1626 | } | ||
1627 | |||
1628 | |||
1629 | /** Return the second word of "user data" for the packet. | ||
1630 | * | ||
1631 | * The contents of the user data words depend on the IPP. | ||
1632 | * | ||
1633 | * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the second | ||
1634 | * word of user data contains the most significant bits of the 64-bit | ||
1635 | * arrival cycle count (see @c get_cycle_count_high()). | ||
1636 | * | ||
1637 | * See the <em>System Programmer's Guide</em> for details. | ||
1638 | * | ||
1639 | * @ingroup ingress | ||
1640 | * | ||
1641 | * @param[in] pkt Packet on which to operate. | ||
1642 | * @return The packet's second word of "user data". | ||
1643 | */ | ||
1644 | static __inline unsigned int | ||
1645 | NETIO_PKT_USER_DATA_1(netio_pkt_t* pkt) | ||
1646 | { | ||
1647 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1648 | |||
1649 | return NETIO_PKT_USER_DATA_1_M(mda, pkt); | ||
1650 | } | ||
1651 | |||
1652 | |||
1653 | /** Determine whether the L4 (TCP/UDP) checksum was calculated. | ||
1654 | * @ingroup ingress | ||
1655 | * | ||
1656 | * @param[in] pkt Packet on which to operate. | ||
1657 | * @return Nonzero if the L4 checksum was calculated. | ||
1658 | */ | ||
1659 | static __inline unsigned int | ||
1660 | NETIO_PKT_L4_CSUM_CALCULATED(netio_pkt_t* pkt) | ||
1661 | { | ||
1662 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1663 | |||
1664 | return NETIO_PKT_L4_CSUM_CALCULATED_M(mda, pkt); | ||
1665 | } | ||
1666 | |||
1667 | |||
1668 | /** Determine whether the L4 (TCP/UDP) checksum was calculated and found to | ||
1669 | * be correct. | ||
1670 | * @ingroup ingress | ||
1671 | * | ||
1672 | * @param[in] pkt Packet on which to operate. | ||
1673 | * @return Nonzero if the checksum was calculated and is correct. | ||
1674 | */ | ||
1675 | static __inline unsigned int | ||
1676 | NETIO_PKT_L4_CSUM_CORRECT(netio_pkt_t* pkt) | ||
1677 | { | ||
1678 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1679 | |||
1680 | return NETIO_PKT_L4_CSUM_CORRECT_M(mda, pkt); | ||
1681 | } | ||
1682 | |||
1683 | |||
1684 | /** Determine whether the L3 (IP) checksum was calculated. | ||
1685 | * @ingroup ingress | ||
1686 | * | ||
1687 | * @param[in] pkt Packet on which to operate. | ||
1688 | * @return Nonzero if the L3 (IP) checksum was calculated. | ||
1689 | */ | ||
1690 | static __inline unsigned int | ||
1691 | NETIO_PKT_L3_CSUM_CALCULATED(netio_pkt_t* pkt) | ||
1692 | { | ||
1693 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1694 | |||
1695 | return NETIO_PKT_L3_CSUM_CALCULATED_M(mda, pkt); | ||
1696 | } | ||
1697 | |||
1698 | |||
1699 | /** Determine whether the L3 (IP) checksum was calculated and found to be | ||
1700 | * correct. | ||
1701 | * @ingroup ingress | ||
1702 | * | ||
1703 | * @param[in] pkt Packet on which to operate. | ||
1704 | * @return Nonzero if the checksum was calculated and is correct. | ||
1705 | */ | ||
1706 | static __inline unsigned int | ||
1707 | NETIO_PKT_L3_CSUM_CORRECT(netio_pkt_t* pkt) | ||
1708 | { | ||
1709 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1710 | |||
1711 | return NETIO_PKT_L3_CSUM_CORRECT_M(mda, pkt); | ||
1712 | } | ||
1713 | |||
1714 | |||
1715 | /** Determine whether the Ethertype was recognized and L3 packet data was | ||
1716 | * processed. | ||
1717 | * @ingroup ingress | ||
1718 | * | ||
1719 | * @param[in] pkt Packet on which to operate. | ||
1720 | * @return Nonzero if the Ethertype was recognized and L3 packet data was | ||
1721 | * processed. | ||
1722 | */ | ||
1723 | static __inline unsigned int | ||
1724 | NETIO_PKT_ETHERTYPE_RECOGNIZED(netio_pkt_t* pkt) | ||
1725 | { | ||
1726 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1727 | |||
1728 | return NETIO_PKT_ETHERTYPE_RECOGNIZED_M(mda, pkt); | ||
1729 | } | ||
1730 | |||
1731 | |||
1732 | /** Set an egress packet's L2 length, using a metadata pointer to speed the | ||
1733 | * computation. | ||
1734 | * @ingroup egress | ||
1735 | * | ||
1736 | * @param[in,out] mmd Pointer to packet's minimal metadata. | ||
1737 | * @param[in] pkt Packet on which to operate. | ||
1738 | * @param[in] len Packet L2 length, in bytes. | ||
1739 | */ | ||
1740 | static __inline void | ||
1741 | NETIO_PKT_SET_L2_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt, | ||
1742 | int len) | ||
1743 | { | ||
1744 | mmd->l2_length = len; | ||
1745 | } | ||
1746 | |||
1747 | |||
1748 | /** Set an egress packet's L2 length. | ||
1749 | * @ingroup egress | ||
1750 | * | ||
1751 | * @param[in,out] pkt Packet on which to operate. | ||
1752 | * @param[in] len Packet L2 length, in bytes. | ||
1753 | */ | ||
1754 | static __inline void | ||
1755 | NETIO_PKT_SET_L2_LENGTH(netio_pkt_t* pkt, int len) | ||
1756 | { | ||
1757 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1758 | |||
1759 | NETIO_PKT_SET_L2_LENGTH_MM(mmd, pkt, len); | ||
1760 | } | ||
1761 | |||
1762 | |||
1763 | /** Set an egress packet's L2 header length, using a metadata pointer to | ||
1764 | * speed the computation. | ||
1765 | * @ingroup egress | ||
1766 | * | ||
1767 | * It is not normally necessary to call this routine; only the L2 length, | ||
1768 | * not the header length, is needed to transmit a packet. It may be useful if | ||
1769 | * the egress packet will later be processed by code which expects to use | ||
1770 | * functions like @ref NETIO_PKT_L3_DATA() to get a pointer to the L3 payload. | ||
1771 | * | ||
1772 | * @param[in,out] mmd Pointer to packet's minimal metadata. | ||
1773 | * @param[in] pkt Packet on which to operate. | ||
1774 | * @param[in] len Packet L2 header length, in bytes. | ||
1775 | */ | ||
1776 | static __inline void | ||
1777 | NETIO_PKT_SET_L2_HEADER_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, | ||
1778 | netio_pkt_t* pkt, int len) | ||
1779 | { | ||
1780 | mmd->l3_offset = mmd->l2_offset + len; | ||
1781 | } | ||
1782 | |||
1783 | |||
1784 | /** Set an egress packet's L2 header length. | ||
1785 | * @ingroup egress | ||
1786 | * | ||
1787 | * It is not normally necessary to call this routine; only the L2 length, | ||
1788 | * not the header length, is needed to transmit a packet. It may be useful if | ||
1789 | * the egress packet will later be processed by code which expects to use | ||
1790 | * functions like @ref NETIO_PKT_L3_DATA() to get a pointer to the L3 payload. | ||
1791 | * | ||
1792 | * @param[in,out] pkt Packet on which to operate. | ||
1793 | * @param[in] len Packet L2 header length, in bytes. | ||
1794 | */ | ||
1795 | static __inline void | ||
1796 | NETIO_PKT_SET_L2_HEADER_LENGTH(netio_pkt_t* pkt, int len) | ||
1797 | { | ||
1798 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1799 | |||
1800 | NETIO_PKT_SET_L2_HEADER_LENGTH_MM(mmd, pkt, len); | ||
1801 | } | ||
1802 | |||
1803 | |||
1804 | /** Set up an egress packet for hardware checksum computation, using a | ||
1805 | * metadata pointer to speed the operation. | ||
1806 | * @ingroup egress | ||
1807 | * | ||
1808 | * NetIO provides the ability to automatically calculate a standard | ||
1809 | * 16-bit Internet checksum on transmitted packets. The application | ||
1810 | * may specify the point in the packet where the checksum starts, the | ||
1811 | * number of bytes to be checksummed, and the two bytes in the packet | ||
1812 | * which will be replaced with the completed checksum. (If the range | ||
1813 | * of bytes to be checksummed includes the bytes to be replaced, the | ||
1814 | * initial values of those bytes will be included in the checksum.) | ||
1815 | * | ||
1816 | * For some protocols, the packet checksum covers data which is not present | ||
1817 | * in the packet, or is at least not contiguous to the main data payload. | ||
1818 | * For instance, the TCP checksum includes a "pseudo-header" which includes | ||
1819 | * the source and destination IP addresses of the packet. To accommodate | ||
1820 | * this, the checksum engine may be "seeded" with an initial value, which | ||
1821 | * the application would need to compute based on the specific protocol's | ||
1822 | * requirements. Note that the seed is given in host byte order (little- | ||
1823 | * endian), not network byte order (big-endian); code written to compute a | ||
1824 | * pseudo-header checksum in network byte order will need to byte-swap it | ||
1825 | * before use as the seed. | ||
1826 | * | ||
1827 | * Note that the checksum is computed as part of the transmission process, | ||
1828 | * so it will not be present in the packet upon completion of this routine. | ||
1829 | * | ||
1830 | * @param[in,out] mmd Pointer to packet's minimal metadata. | ||
1831 | * @param[in] pkt Packet on which to operate. | ||
1832 | * @param[in] start Offset within L2 packet of the first byte to include in | ||
1833 | * the checksum. | ||
1834 | * @param[in] length Number of bytes to include in the checksum. | ||
1835 | * the checksum. | ||
1836 | * @param[in] location Offset within L2 packet of the first of the two bytes | ||
1837 | * to be replaced with the calculated checksum. | ||
1838 | * @param[in] seed Initial value of the running checksum before any of the | ||
1839 | * packet data is added. | ||
1840 | */ | ||
1841 | static __inline void | ||
1842 | NETIO_PKT_DO_EGRESS_CSUM_MM(netio_pkt_minimal_metadata_t* mmd, | ||
1843 | netio_pkt_t* pkt, int start, int length, | ||
1844 | int location, uint16_t seed) | ||
1845 | { | ||
1846 | mmd->csum_start = start; | ||
1847 | mmd->csum_length = length; | ||
1848 | mmd->csum_location = location; | ||
1849 | mmd->csum_seed = seed; | ||
1850 | mmd->flags |= _NETIO_PKT_NEED_EDMA_CSUM_MASK; | ||
1851 | } | ||
1852 | |||
1853 | |||
1854 | /** Set up an egress packet for hardware checksum computation. | ||
1855 | * @ingroup egress | ||
1856 | * | ||
1857 | * NetIO provides the ability to automatically calculate a standard | ||
1858 | * 16-bit Internet checksum on transmitted packets. The application | ||
1859 | * may specify the point in the packet where the checksum starts, the | ||
1860 | * number of bytes to be checksummed, and the two bytes in the packet | ||
1861 | * which will be replaced with the completed checksum. (If the range | ||
1862 | * of bytes to be checksummed includes the bytes to be replaced, the | ||
1863 | * initial values of those bytes will be included in the checksum.) | ||
1864 | * | ||
1865 | * For some protocols, the packet checksum covers data which is not present | ||
1866 | * in the packet, or is at least not contiguous to the main data payload. | ||
1867 | * For instance, the TCP checksum includes a "pseudo-header" which includes | ||
1868 | * the source and destination IP addresses of the packet. To accommodate | ||
1869 | * this, the checksum engine may be "seeded" with an initial value, which | ||
1870 | * the application would need to compute based on the specific protocol's | ||
1871 | * requirements. Note that the seed is given in host byte order (little- | ||
1872 | * endian), not network byte order (big-endian); code written to compute a | ||
1873 | * pseudo-header checksum in network byte order will need to byte-swap it | ||
1874 | * before use as the seed. | ||
1875 | * | ||
1876 | * Note that the checksum is computed as part of the transmission process, | ||
1877 | * so it will not be present in the packet upon completion of this routine. | ||
1878 | * | ||
1879 | * @param[in,out] pkt Packet on which to operate. | ||
1880 | * @param[in] start Offset within L2 packet of the first byte to include in | ||
1881 | * the checksum. | ||
1882 | * @param[in] length Number of bytes to include in the checksum. | ||
1883 | * the checksum. | ||
1884 | * @param[in] location Offset within L2 packet of the first of the two bytes | ||
1885 | * to be replaced with the calculated checksum. | ||
1886 | * @param[in] seed Initial value of the running checksum before any of the | ||
1887 | * packet data is added. | ||
1888 | */ | ||
1889 | static __inline void | ||
1890 | NETIO_PKT_DO_EGRESS_CSUM(netio_pkt_t* pkt, int start, int length, | ||
1891 | int location, uint16_t seed) | ||
1892 | { | ||
1893 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1894 | |||
1895 | NETIO_PKT_DO_EGRESS_CSUM_MM(mmd, pkt, start, length, location, seed); | ||
1896 | } | ||
1897 | |||
1898 | |||
1899 | /** Return the number of bytes which could be prepended to a packet, using a | ||
1900 | * metadata pointer to speed the operation. | ||
1901 | * See @ref netio_populate_prepend_buffer() to get a full description of | ||
1902 | * prepending. | ||
1903 | * | ||
1904 | * @param[in,out] mda Pointer to packet's standard metadata. | ||
1905 | * @param[in] pkt Packet on which to operate. | ||
1906 | */ | ||
1907 | static __inline int | ||
1908 | NETIO_PKT_PREPEND_AVAIL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1909 | { | ||
1910 | return (pkt->__packet.bits.__offset << 6) + | ||
1911 | NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt); | ||
1912 | } | ||
1913 | |||
1914 | |||
1915 | /** Return the number of bytes which could be prepended to a packet, using a | ||
1916 | * metadata pointer to speed the operation. | ||
1917 | * See @ref netio_populate_prepend_buffer() to get a full description of | ||
1918 | * prepending. | ||
1919 | * @ingroup egress | ||
1920 | * | ||
1921 | * @param[in,out] mmd Pointer to packet's minimal metadata. | ||
1922 | * @param[in] pkt Packet on which to operate. | ||
1923 | */ | ||
1924 | static __inline int | ||
1925 | NETIO_PKT_PREPEND_AVAIL_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt) | ||
1926 | { | ||
1927 | return (pkt->__packet.bits.__offset << 6) + mmd->l2_offset; | ||
1928 | } | ||
1929 | |||
1930 | |||
1931 | /** Return the number of bytes which could be prepended to a packet. | ||
1932 | * See @ref netio_populate_prepend_buffer() to get a full description of | ||
1933 | * prepending. | ||
1934 | * @ingroup egress | ||
1935 | * | ||
1936 | * @param[in] pkt Packet on which to operate. | ||
1937 | */ | ||
1938 | static __inline int | ||
1939 | NETIO_PKT_PREPEND_AVAIL(netio_pkt_t* pkt) | ||
1940 | { | ||
1941 | if (NETIO_PKT_IS_MINIMAL(pkt)) | ||
1942 | { | ||
1943 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1944 | |||
1945 | return NETIO_PKT_PREPEND_AVAIL_MM(mmd, pkt); | ||
1946 | } | ||
1947 | else | ||
1948 | { | ||
1949 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1950 | |||
1951 | return NETIO_PKT_PREPEND_AVAIL_M(mda, pkt); | ||
1952 | } | ||
1953 | } | ||
1954 | |||
1955 | |||
1956 | /** Flush a packet's minimal metadata from the cache, using a metadata pointer | ||
1957 | * to speed the operation. | ||
1958 | * @ingroup egress | ||
1959 | * | ||
1960 | * @param[in] mmd Pointer to packet's minimal metadata. | ||
1961 | * @param[in] pkt Packet on which to operate. | ||
1962 | */ | ||
1963 | static __inline void | ||
1964 | NETIO_PKT_FLUSH_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd, | ||
1965 | netio_pkt_t* pkt) | ||
1966 | { | ||
1967 | } | ||
1968 | |||
1969 | |||
1970 | /** Invalidate a packet's minimal metadata from the cache, using a metadata | ||
1971 | * pointer to speed the operation. | ||
1972 | * @ingroup egress | ||
1973 | * | ||
1974 | * @param[in] mmd Pointer to packet's minimal metadata. | ||
1975 | * @param[in] pkt Packet on which to operate. | ||
1976 | */ | ||
1977 | static __inline void | ||
1978 | NETIO_PKT_INV_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd, | ||
1979 | netio_pkt_t* pkt) | ||
1980 | { | ||
1981 | } | ||
1982 | |||
1983 | |||
1984 | /** Flush and then invalidate a packet's minimal metadata from the cache, | ||
1985 | * using a metadata pointer to speed the operation. | ||
1986 | * @ingroup egress | ||
1987 | * | ||
1988 | * @param[in] mmd Pointer to packet's minimal metadata. | ||
1989 | * @param[in] pkt Packet on which to operate. | ||
1990 | */ | ||
1991 | static __inline void | ||
1992 | NETIO_PKT_FLUSH_INV_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd, | ||
1993 | netio_pkt_t* pkt) | ||
1994 | { | ||
1995 | } | ||
1996 | |||
1997 | |||
1998 | /** Flush a packet's metadata from the cache, using a metadata pointer | ||
1999 | * to speed the operation. | ||
2000 | * @ingroup ingress | ||
2001 | * | ||
2002 | * @param[in] mda Pointer to packet's minimal metadata. | ||
2003 | * @param[in] pkt Packet on which to operate. | ||
2004 | */ | ||
2005 | static __inline void | ||
2006 | NETIO_PKT_FLUSH_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
2007 | { | ||
2008 | } | ||
2009 | |||
2010 | |||
2011 | /** Invalidate a packet's metadata from the cache, using a metadata | ||
2012 | * pointer to speed the operation. | ||
2013 | * @ingroup ingress | ||
2014 | * | ||
2015 | * @param[in] mda Pointer to packet's metadata. | ||
2016 | * @param[in] pkt Packet on which to operate. | ||
2017 | */ | ||
2018 | static __inline void | ||
2019 | NETIO_PKT_INV_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
2020 | { | ||
2021 | } | ||
2022 | |||
2023 | |||
2024 | /** Flush and then invalidate a packet's metadata from the cache, | ||
2025 | * using a metadata pointer to speed the operation. | ||
2026 | * @ingroup ingress | ||
2027 | * | ||
2028 | * @param[in] mda Pointer to packet's metadata. | ||
2029 | * @param[in] pkt Packet on which to operate. | ||
2030 | */ | ||
2031 | static __inline void | ||
2032 | NETIO_PKT_FLUSH_INV_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
2033 | { | ||
2034 | } | ||
2035 | |||
2036 | |||
2037 | /** Flush a packet's minimal metadata from the cache. | ||
2038 | * @ingroup egress | ||
2039 | * | ||
2040 | * @param[in] pkt Packet on which to operate. | ||
2041 | */ | ||
2042 | static __inline void | ||
2043 | NETIO_PKT_FLUSH_MINIMAL_METADATA(netio_pkt_t* pkt) | ||
2044 | { | ||
2045 | } | ||
2046 | |||
2047 | |||
2048 | /** Invalidate a packet's minimal metadata from the cache. | ||
2049 | * @ingroup egress | ||
2050 | * | ||
2051 | * @param[in] pkt Packet on which to operate. | ||
2052 | */ | ||
2053 | static __inline void | ||
2054 | NETIO_PKT_INV_MINIMAL_METADATA(netio_pkt_t* pkt) | ||
2055 | { | ||
2056 | } | ||
2057 | |||
2058 | |||
2059 | /** Flush and then invalidate a packet's minimal metadata from the cache. | ||
2060 | * @ingroup egress | ||
2061 | * | ||
2062 | * @param[in] pkt Packet on which to operate. | ||
2063 | */ | ||
2064 | static __inline void | ||
2065 | NETIO_PKT_FLUSH_INV_MINIMAL_METADATA(netio_pkt_t* pkt) | ||
2066 | { | ||
2067 | } | ||
2068 | |||
2069 | |||
2070 | /** Flush a packet's metadata from the cache. | ||
2071 | * @ingroup ingress | ||
2072 | * | ||
2073 | * @param[in] pkt Packet on which to operate. | ||
2074 | */ | ||
2075 | static __inline void | ||
2076 | NETIO_PKT_FLUSH_METADATA(netio_pkt_t* pkt) | ||
2077 | { | ||
2078 | } | ||
2079 | |||
2080 | |||
2081 | /** Invalidate a packet's metadata from the cache. | ||
2082 | * @ingroup ingress | ||
2083 | * | ||
2084 | * @param[in] pkt Packet on which to operate. | ||
2085 | */ | ||
2086 | static __inline void | ||
2087 | NETIO_PKT_INV_METADATA(netio_pkt_t* pkt) | ||
2088 | { | ||
2089 | } | ||
2090 | |||
2091 | |||
2092 | /** Flush and then invalidate a packet's metadata from the cache. | ||
2093 | * @ingroup ingress | ||
2094 | * | ||
2095 | * @param[in] pkt Packet on which to operate. | ||
2096 | */ | ||
2097 | static __inline void | ||
2098 | NETIO_PKT_FLUSH_INV_METADATA(netio_pkt_t* pkt) | ||
2099 | { | ||
2100 | } | ||
2101 | |||
2102 | /** Number of NUMA nodes we can distribute buffers to. | ||
2103 | * @ingroup setup */ | ||
2104 | #define NETIO_NUM_NODE_WEIGHTS 16 | ||
2105 | |||
2106 | /** | ||
2107 | * @brief An object for specifying the characteristics of NetIO communication | ||
2108 | * endpoint. | ||
2109 | * | ||
2110 | * @ingroup setup | ||
2111 | * | ||
2112 | * The @ref netio_input_register() function uses this structure to define | ||
2113 | * how an application tile will communicate with an IPP. | ||
2114 | * | ||
2115 | * | ||
2116 | * Future updates to NetIO may add new members to this structure, | ||
2117 | * which can affect the success of the registration operation. Thus, | ||
2118 | * if dynamically initializing the structure, applications are urged to | ||
2119 | * zero it out first, for example: | ||
2120 | * | ||
2121 | * @code | ||
2122 | * netio_input_config_t config; | ||
2123 | * memset(&config, 0, sizeof (config)); | ||
2124 | * config.flags = NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE; | ||
2125 | * config.num_receive_packets = NETIO_MAX_RECEIVE_PKTS; | ||
2126 | * config.queue_id = 0; | ||
2127 | * . | ||
2128 | * . | ||
2129 | * . | ||
2130 | * @endcode | ||
2131 | * | ||
2132 | * since that guarantees that any unused structure members, including | ||
2133 | * members which did not exist when the application was first developed, | ||
2134 | * will not have unexpected values. | ||
2135 | * | ||
2136 | * If statically initializing the structure, we strongly recommend use of | ||
2137 | * C99-style named initializers, for example: | ||
2138 | * | ||
2139 | * @code | ||
2140 | * netio_input_config_t config = { | ||
2141 | * .flags = NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE, | ||
2142 | * .num_receive_packets = NETIO_MAX_RECEIVE_PKTS, | ||
2143 | * .queue_id = 0, | ||
2144 | * }, | ||
2145 | * @endcode | ||
2146 | * | ||
2147 | * instead of the old-style structure initialization: | ||
2148 | * | ||
2149 | * @code | ||
2150 | * // Bad example! Currently equivalent to the above, but don't do this. | ||
2151 | * netio_input_config_t config = { | ||
2152 | * NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE, NETIO_MAX_RECEIVE_PKTS, 0 | ||
2153 | * }, | ||
2154 | * @endcode | ||
2155 | * | ||
2156 | * since the C99 style requires no changes to the code if elements of the | ||
2157 | * config structure are rearranged. (It also makes the initialization much | ||
2158 | * easier to understand.) | ||
2159 | * | ||
2160 | * Except for items which address a particular tile's transmit or receive | ||
2161 | * characteristics, such as the ::NETIO_RECV flag, applications are advised | ||
2162 | * to specify the same set of configuration data on all registrations. | ||
2163 | * This prevents differing results if multiple tiles happen to do their | ||
2164 | * registration operations in a different order on different invocations of | ||
2165 | * the application. This is particularly important for things like link | ||
2166 | * management flags, and buffer size and homing specifications. | ||
2167 | * | ||
2168 | * Unless the ::NETIO_FIXED_BUFFER_VA flag is specified in flags, the NetIO | ||
2169 | * buffer pool is automatically created and mapped into the application's | ||
2170 | * virtual address space at an address chosen by the operating system, | ||
2171 | * using the common memory (cmem) facility in the Tilera Multicore | ||
2172 | * Components library. The cmem facility allows multiple processes to gain | ||
2173 | * access to shared memory which is mapped into each process at an | ||
2174 | * identical virtual address. In order for this to work, the processes | ||
2175 | * must have a common ancestor, which must create the common memory using | ||
2176 | * tmc_cmem_init(). | ||
2177 | * | ||
2178 | * In programs using the iLib process creation API, or in programs which use | ||
2179 | * only one process (which include programs using the pthreads library), | ||
2180 | * tmc_cmem_init() is called automatically. All other applications | ||
2181 | * must call it explicitly, before any child processes which might call | ||
2182 | * netio_input_register() are created. | ||
2183 | */ | ||
2184 | typedef struct | ||
2185 | { | ||
2186 | /** Registration characteristics. | ||
2187 | |||
2188 | This value determines several characteristics of the registration; | ||
2189 | flags for different types of behavior are ORed together to make the | ||
2190 | final flag value. Generally applications should specify exactly | ||
2191 | one flag from each of the following categories: | ||
2192 | |||
2193 | - Whether the application will be receiving packets on this queue | ||
2194 | (::NETIO_RECV or ::NETIO_NO_RECV). | ||
2195 | |||
2196 | - Whether the application will be transmitting packets on this queue, | ||
2197 | and if so, whether it will request egress checksum calculation | ||
2198 | (::NETIO_XMIT, ::NETIO_XMIT_CSUM, or ::NETIO_NO_XMIT). It is | ||
2199 | legal to call netio_get_buffer() without one of the XMIT flags, | ||
2200 | as long as ::NETIO_RECV is specified; in this case, the retrieved | ||
2201 | buffers must be passed to another tile for transmission. | ||
2202 | |||
2203 | - Whether the application expects any vendor-specific tags in | ||
2204 | its packets' L2 headers (::NETIO_TAG_NONE, ::NETIO_TAG_BRCM, | ||
2205 | or ::NETIO_TAG_MRVL). This must match the configuration of the | ||
2206 | target IPP. | ||
2207 | |||
2208 | To accommodate applications written to previous versions of the NetIO | ||
2209 | interface, none of the flags above are currently required; if omitted, | ||
2210 | NetIO behaves more or less as if ::NETIO_RECV | ::NETIO_XMIT_CSUM | | ||
2211 | ::NETIO_TAG_NONE were used. However, explicit specification of | ||
2212 | the relevant flags allows NetIO to do a better job of resource | ||
2213 | allocation, allows earlier detection of certain configuration errors, | ||
2214 | and may enable advanced features or higher performance in the future, | ||
2215 | so their use is strongly recommended. | ||
2216 | |||
2217 | Note that specifying ::NETIO_NO_RECV along with ::NETIO_NO_XMIT | ||
2218 | is a special case, intended primarily for use by programs which | ||
2219 | retrieve network statistics or do link management operations. | ||
2220 | When these flags are both specified, the resulting queue may not | ||
2221 | be used with NetIO routines other than netio_get(), netio_set(), | ||
2222 | and netio_input_unregister(). See @ref link for more information | ||
2223 | on link management. | ||
2224 | |||
2225 | Other flags are optional; their use is described below. | ||
2226 | */ | ||
2227 | int flags; | ||
2228 | |||
2229 | /** Interface name. This is a string which identifies the specific | ||
2230 | Ethernet controller hardware to be used. The format of the string | ||
2231 | is a device type and a device index, separated by a slash; so, | ||
2232 | the first 10 Gigabit Ethernet controller is named "xgbe/0", while | ||
2233 | the second 10/100/1000 Megabit Ethernet controller is named "gbe/1". | ||
2234 | */ | ||
2235 | const char* interface; | ||
2236 | |||
2237 | /** Receive packet queue size. This specifies the maximum number | ||
2238 | of ingress packets that can be received on this queue without | ||
2239 | being retrieved by @ref netio_get_packet(). If the IPP's distribution | ||
2240 | algorithm calls for a packet to be sent to this queue, and this | ||
2241 | number of packets are already pending there, the new packet | ||
2242 | will either be discarded, or sent to another tile registered | ||
2243 | for the same queue_id (see @ref drops). This value must | ||
2244 | be at least ::NETIO_MIN_RECEIVE_PKTS, can always be at least | ||
2245 | ::NETIO_MAX_RECEIVE_PKTS, and may be larger than that on certain | ||
2246 | interfaces. | ||
2247 | */ | ||
2248 | int num_receive_packets; | ||
2249 | |||
2250 | /** The queue ID being requested. Legal values for this range from 0 | ||
2251 | to ::NETIO_MAX_QUEUE_ID, inclusive. ::NETIO_MAX_QUEUE_ID is always | ||
2252 | greater than or equal to the number of tiles; this allows one queue | ||
2253 | for each tile, plus at least one additional queue. Some applications | ||
2254 | may wish to use the additional queue as a destination for unwanted | ||
2255 | packets, since packets delivered to queues for which no tiles have | ||
2256 | registered are discarded. | ||
2257 | */ | ||
2258 | unsigned int queue_id; | ||
2259 | |||
2260 | /** Maximum number of small send buffers to be held in the local empty | ||
2261 | buffer cache. This specifies the size of the area which holds | ||
2262 | empty small egress buffers requested from the IPP but not yet | ||
2263 | retrieved via @ref netio_get_buffer(). This value must be greater | ||
2264 | than zero if the application will ever use @ref netio_get_buffer() | ||
2265 | to allocate empty small egress buffers; it may be no larger than | ||
2266 | ::NETIO_MAX_SEND_BUFFERS. See @ref epp for more details on empty | ||
2267 | buffer caching. | ||
2268 | */ | ||
2269 | int num_send_buffers_small_total; | ||
2270 | |||
2271 | /** Number of small send buffers to be preallocated at registration. | ||
2272 | If this value is nonzero, the specified number of empty small egress | ||
2273 | buffers will be requested from the IPP during the netio_input_register | ||
2274 | operation; this may speed the execution of @ref netio_get_buffer(). | ||
2275 | This may be no larger than @ref num_send_buffers_small_total. See @ref | ||
2276 | epp for more details on empty buffer caching. | ||
2277 | */ | ||
2278 | int num_send_buffers_small_prealloc; | ||
2279 | |||
2280 | /** Maximum number of large send buffers to be held in the local empty | ||
2281 | buffer cache. This specifies the size of the area which holds empty | ||
2282 | large egress buffers requested from the IPP but not yet retrieved via | ||
2283 | @ref netio_get_buffer(). This value must be greater than zero if the | ||
2284 | application will ever use @ref netio_get_buffer() to allocate empty | ||
2285 | large egress buffers; it may be no larger than ::NETIO_MAX_SEND_BUFFERS. | ||
2286 | See @ref epp for more details on empty buffer caching. | ||
2287 | */ | ||
2288 | int num_send_buffers_large_total; | ||
2289 | |||
2290 | /** Number of large send buffers to be preallocated at registration. | ||
2291 | If this value is nonzero, the specified number of empty large egress | ||
2292 | buffers will be requested from the IPP during the netio_input_register | ||
2293 | operation; this may speed the execution of @ref netio_get_buffer(). | ||
2294 | This may be no larger than @ref num_send_buffers_large_total. See @ref | ||
2295 | epp for more details on empty buffer caching. | ||
2296 | */ | ||
2297 | int num_send_buffers_large_prealloc; | ||
2298 | |||
2299 | /** Maximum number of jumbo send buffers to be held in the local empty | ||
2300 | buffer cache. This specifies the size of the area which holds empty | ||
2301 | jumbo egress buffers requested from the IPP but not yet retrieved via | ||
2302 | @ref netio_get_buffer(). This value must be greater than zero if the | ||
2303 | application will ever use @ref netio_get_buffer() to allocate empty | ||
2304 | jumbo egress buffers; it may be no larger than ::NETIO_MAX_SEND_BUFFERS. | ||
2305 | See @ref epp for more details on empty buffer caching. | ||
2306 | */ | ||
2307 | int num_send_buffers_jumbo_total; | ||
2308 | |||
2309 | /** Number of jumbo send buffers to be preallocated at registration. | ||
2310 | If this value is nonzero, the specified number of empty jumbo egress | ||
2311 | buffers will be requested from the IPP during the netio_input_register | ||
2312 | operation; this may speed the execution of @ref netio_get_buffer(). | ||
2313 | This may be no larger than @ref num_send_buffers_jumbo_total. See @ref | ||
2314 | epp for more details on empty buffer caching. | ||
2315 | */ | ||
2316 | int num_send_buffers_jumbo_prealloc; | ||
2317 | |||
2318 | /** Total packet buffer size. This determines the total size, in bytes, | ||
2319 | of the NetIO buffer pool. Note that the maximum number of available | ||
2320 | buffers of each size is determined during hypervisor configuration | ||
2321 | (see the <em>System Programmer's Guide</em> for details); this just | ||
2322 | influences how much host memory is allocated for those buffers. | ||
2323 | |||
2324 | The buffer pool is allocated from common memory, which will be | ||
2325 | automatically initialized if needed. If your buffer pool is larger | ||
2326 | than 240 MB, you might need to explicitly call @c tmc_cmem_init(), | ||
2327 | as described in the Application Libraries Reference Manual (UG227). | ||
2328 | |||
2329 | Packet buffers are currently allocated in chunks of 16 MB; this | ||
2330 | value will be rounded up to the next larger multiple of 16 MB. | ||
2331 | If this value is zero, a default of 32 MB will be used; this was | ||
2332 | the value used by previous versions of NetIO. Note that taking this | ||
2333 | default also affects the placement of buffers on Linux NUMA nodes. | ||
2334 | See @ref buffer_node_weights for an explanation of buffer placement. | ||
2335 | |||
2336 | In order to successfully allocate packet buffers, Linux must have | ||
2337 | available huge pages on the relevant Linux NUMA nodes. See the | ||
2338 | <em>System Programmer's Guide</em> for information on configuring | ||
2339 | huge page support in Linux. | ||
2340 | */ | ||
2341 | uint64_t total_buffer_size; | ||
2342 | |||
2343 | /** Buffer placement weighting factors. | ||
2344 | |||
2345 | This array specifies the relative amount of buffering to place | ||
2346 | on each of the available Linux NUMA nodes. This array is | ||
2347 | indexed by the NUMA node, and the values in the array are | ||
2348 | proportional to the amount of buffer space to allocate on that | ||
2349 | node. | ||
2350 | |||
2351 | If memory striping is enabled in the Hypervisor, then there is | ||
2352 | only one logical NUMA node (node 0). In that case, NetIO will by | ||
2353 | default ignore the suggested buffer node weights, and buffers | ||
2354 | will be striped across the physical memory controllers. See | ||
2355 | UG209 System Programmer's Guide for a description of the | ||
2356 | hypervisor option that controls memory striping. | ||
2357 | |||
2358 | If memory striping is disabled, then there are up to four NUMA | ||
2359 | nodes, corresponding to the four DDRAM controllers in the TILE | ||
2360 | processor architecture. See UG100 Tile Processor Architecture | ||
2361 | Overview for a diagram showing the location of each of the DDRAM | ||
2362 | controllers relative to the tile array. | ||
2363 | |||
2364 | For instance, if memory striping is disabled, the following | ||
2365 | configuration strucure: | ||
2366 | |||
2367 | @code | ||
2368 | netio_input_config_t config = { | ||
2369 | . | ||
2370 | . | ||
2371 | . | ||
2372 | .total_buffer_size = 4 * 16 * 1024 * 1024; | ||
2373 | .buffer_node_weights = { 1, 0, 1, 0 }, | ||
2374 | }, | ||
2375 | @endcode | ||
2376 | |||
2377 | would result in 32 MB of buffers being placed on controller 0, and | ||
2378 | 32 MB on controller 2. (Since buffers are allocated in units of | ||
2379 | 16 MB, some sets of weights will not be able to be matched exactly.) | ||
2380 | |||
2381 | For the weights to be effective, @ref total_buffer_size must be | ||
2382 | nonzero. If @ref total_buffer_size is zero, causing the default | ||
2383 | 32 MB of buffer space to be used, then any specified weights will | ||
2384 | be ignored, and buffers will positioned as they were in previous | ||
2385 | versions of NetIO: | ||
2386 | |||
2387 | - For xgbe/0 and gbe/0, 16 MB of buffers will be placed on controller 1, | ||
2388 | and the other 16 MB will be placed on controller 2. | ||
2389 | |||
2390 | - For xgbe/1 and gbe/1, 16 MB of buffers will be placed on controller 2, | ||
2391 | and the other 16 MB will be placed on controller 3. | ||
2392 | |||
2393 | If @ref total_buffer_size is nonzero, but all weights are zero, | ||
2394 | then all buffer space will be allocated on Linux NUMA node zero. | ||
2395 | |||
2396 | By default, the specified buffer placement is treated as a hint; | ||
2397 | if sufficient free memory is not available on the specified | ||
2398 | controllers, the buffers will be allocated elsewhere. However, | ||
2399 | if the ::NETIO_STRICT_HOMING flag is specified in @ref flags, then a | ||
2400 | failure to allocate buffer space exactly as requested will cause the | ||
2401 | registration operation to fail with an error of ::NETIO_CANNOT_HOME. | ||
2402 | |||
2403 | Note that maximal network performance cannot be achieved with | ||
2404 | only one memory controller. | ||
2405 | */ | ||
2406 | uint8_t buffer_node_weights[NETIO_NUM_NODE_WEIGHTS]; | ||
2407 | |||
2408 | /** Fixed virtual address for packet buffers. Only valid when | ||
2409 | ::NETIO_FIXED_BUFFER_VA is specified in @ref flags; see the | ||
2410 | description of that flag for details. | ||
2411 | */ | ||
2412 | void* fixed_buffer_va; | ||
2413 | |||
2414 | /** | ||
2415 | Maximum number of outstanding send packet requests. This value is | ||
2416 | only relevant when an EPP is in use; it determines the number of | ||
2417 | slots in the EPP's outgoing packet queue which this tile is allowed | ||
2418 | to consume, and thus the number of packets which may be sent before | ||
2419 | the sending tile must wait for an acknowledgment from the EPP. | ||
2420 | Modifying this value is generally only helpful when using @ref | ||
2421 | netio_send_packet_vector(), where it can help improve performance by | ||
2422 | allowing a single vector send operation to process more packets. | ||
2423 | Typically it is not specified, and the default, which divides the | ||
2424 | outgoing packet slots evenly between all tiles on the chip, is used. | ||
2425 | |||
2426 | If a registration asks for more outgoing packet queue slots than are | ||
2427 | available, ::NETIO_TOOMANY_XMIT will be returned. The total number | ||
2428 | of packet queue slots which are available for all tiles for each EPP | ||
2429 | is subject to change, but is currently ::NETIO_TOTAL_SENDS_OUTSTANDING. | ||
2430 | |||
2431 | |||
2432 | This value is ignored if ::NETIO_XMIT is not specified in flags. | ||
2433 | If you want to specify a large value here for a specific tile, you are | ||
2434 | advised to specify NETIO_NO_XMIT on other, non-transmitting tiles so | ||
2435 | that they do not consume a default number of packet slots. Any tile | ||
2436 | transmitting is required to have at least ::NETIO_MIN_SENDS_OUTSTANDING | ||
2437 | slots allocated to it; values less than that will be silently | ||
2438 | increased by the NetIO library. | ||
2439 | */ | ||
2440 | int num_sends_outstanding; | ||
2441 | } | ||
2442 | netio_input_config_t; | ||
2443 | |||
2444 | |||
2445 | /** Registration flags; used in the @ref netio_input_config_t structure. | ||
2446 | * @addtogroup setup | ||
2447 | */ | ||
2448 | /** @{ */ | ||
2449 | |||
2450 | /** Fail a registration request if we can't put packet buffers | ||
2451 | on the specified memory controllers. */ | ||
2452 | #define NETIO_STRICT_HOMING 0x00000002 | ||
2453 | |||
2454 | /** This application expects no tags on its L2 headers. */ | ||
2455 | #define NETIO_TAG_NONE 0x00000004 | ||
2456 | |||
2457 | /** This application expects Marvell extended tags on its L2 headers. */ | ||
2458 | #define NETIO_TAG_MRVL 0x00000008 | ||
2459 | |||
2460 | /** This application expects Broadcom tags on its L2 headers. */ | ||
2461 | #define NETIO_TAG_BRCM 0x00000010 | ||
2462 | |||
2463 | /** This registration may call routines which receive packets. */ | ||
2464 | #define NETIO_RECV 0x00000020 | ||
2465 | |||
2466 | /** This registration may not call routines which receive packets. */ | ||
2467 | #define NETIO_NO_RECV 0x00000040 | ||
2468 | |||
2469 | /** This registration may call routines which transmit packets. */ | ||
2470 | #define NETIO_XMIT 0x00000080 | ||
2471 | |||
2472 | /** This registration may call routines which transmit packets with | ||
2473 | checksum acceleration. */ | ||
2474 | #define NETIO_XMIT_CSUM 0x00000100 | ||
2475 | |||
2476 | /** This registration may not call routines which transmit packets. */ | ||
2477 | #define NETIO_NO_XMIT 0x00000200 | ||
2478 | |||
2479 | /** This registration wants NetIO buffers mapped at an application-specified | ||
2480 | virtual address. | ||
2481 | |||
2482 | NetIO buffers are by default created by the TMC common memory facility, | ||
2483 | which must be configured by a common ancestor of all processes sharing | ||
2484 | a network interface. When this flag is specified, NetIO buffers are | ||
2485 | instead mapped at an address chosen by the application (and specified | ||
2486 | in @ref netio_input_config_t::fixed_buffer_va). This allows multiple | ||
2487 | unrelated but cooperating processes to share a NetIO interface. | ||
2488 | All processes sharing the same interface must specify this flag, | ||
2489 | and all must specify the same fixed virtual address. | ||
2490 | |||
2491 | @ref netio_input_config_t::fixed_buffer_va must be a | ||
2492 | multiple of 16 MB, and the packet buffers will occupy @ref | ||
2493 | netio_input_config_t::total_buffer_size bytes of virtual address | ||
2494 | space, beginning at that address. If any of those virtual addresses | ||
2495 | are currently occupied by other memory objects, like application or | ||
2496 | shared library code or data, @ref netio_input_register() will return | ||
2497 | ::NETIO_FAULT. While it is impossible to provide a fixed_buffer_va | ||
2498 | which will work for all applications, a good first guess might be to | ||
2499 | use 0xb0000000 minus @ref netio_input_config_t::total_buffer_size. | ||
2500 | If that fails, it might be helpful to consult the running application's | ||
2501 | virtual address description file (/proc/<em>pid</em>/maps) to see | ||
2502 | which regions of virtual address space are available. | ||
2503 | */ | ||
2504 | #define NETIO_FIXED_BUFFER_VA 0x00000400 | ||
2505 | |||
2506 | /** This registration call will not complete unless the network link | ||
2507 | is up. The process will wait several seconds for this to happen (the | ||
2508 | precise interval is link-dependent), but if the link does not come up, | ||
2509 | ::NETIO_LINK_DOWN will be returned. This flag is the default if | ||
2510 | ::NETIO_NOREQUIRE_LINK_UP is not specified. Note that this flag by | ||
2511 | itself does not request that the link be brought up; that can be done | ||
2512 | with the ::NETIO_AUTO_LINK_UPDN or ::NETIO_AUTO_LINK_UP flags (the | ||
2513 | latter is the default if no NETIO_AUTO_LINK_xxx flags are specified), | ||
2514 | or by explicitly setting the link's desired state via netio_set(). | ||
2515 | If the link is not brought up by one of those methods, and this flag | ||
2516 | is specified, the registration operation will return ::NETIO_LINK_DOWN. | ||
2517 | This flag is ignored if it is specified along with ::NETIO_NO_XMIT and | ||
2518 | ::NETIO_NO_RECV. See @ref link for more information on link | ||
2519 | management. | ||
2520 | */ | ||
2521 | #define NETIO_REQUIRE_LINK_UP 0x00000800 | ||
2522 | |||
2523 | /** This registration call will complete even if the network link is not up. | ||
2524 | Whenever the link is not up, packets will not be sent or received: | ||
2525 | netio_get_packet() will return ::NETIO_NOPKT once all queued packets | ||
2526 | have been drained, and netio_send_packet() and similar routines will | ||
2527 | return NETIO_QUEUE_FULL once the outgoing packet queue in the EPP | ||
2528 | or the I/O shim is full. See @ref link for more information on link | ||
2529 | management. | ||
2530 | */ | ||
2531 | #define NETIO_NOREQUIRE_LINK_UP 0x00001000 | ||
2532 | |||
2533 | #ifndef __DOXYGEN__ | ||
2534 | /* | ||
2535 | * These are part of the implementation of the NETIO_AUTO_LINK_xxx flags, | ||
2536 | * but should not be used directly by applications, and are thus not | ||
2537 | * documented. | ||
2538 | */ | ||
2539 | #define _NETIO_AUTO_UP 0x00002000 | ||
2540 | #define _NETIO_AUTO_DN 0x00004000 | ||
2541 | #define _NETIO_AUTO_PRESENT 0x00008000 | ||
2542 | #endif | ||
2543 | |||
2544 | /** Set the desired state of the link to up, allowing any speeds which are | ||
2545 | supported by the link hardware, as part of this registration operation. | ||
2546 | Do not take down the link automatically. This is the default if | ||
2547 | no other NETIO_AUTO_LINK_xxx flags are specified. This flag is ignored | ||
2548 | if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV. | ||
2549 | See @ref link for more information on link management. | ||
2550 | */ | ||
2551 | #define NETIO_AUTO_LINK_UP (_NETIO_AUTO_PRESENT | _NETIO_AUTO_UP) | ||
2552 | |||
2553 | /** Set the desired state of the link to up, allowing any speeds which are | ||
2554 | supported by the link hardware, as part of this registration operation. | ||
2555 | Set the desired state of the link to down the next time no tiles are | ||
2556 | registered for packet reception or transmission. This flag is ignored | ||
2557 | if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV. | ||
2558 | See @ref link for more information on link management. | ||
2559 | */ | ||
2560 | #define NETIO_AUTO_LINK_UPDN (_NETIO_AUTO_PRESENT | _NETIO_AUTO_UP | \ | ||
2561 | _NETIO_AUTO_DN) | ||
2562 | |||
2563 | /** Set the desired state of the link to down the next time no tiles are | ||
2564 | registered for packet reception or transmission. This flag is ignored | ||
2565 | if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV. | ||
2566 | See @ref link for more information on link management. | ||
2567 | */ | ||
2568 | #define NETIO_AUTO_LINK_DN (_NETIO_AUTO_PRESENT | _NETIO_AUTO_DN) | ||
2569 | |||
2570 | /** Do not bring up the link automatically as part of this registration | ||
2571 | operation. Do not take down the link automatically. This flag | ||
2572 | is ignored if it is specified along with ::NETIO_NO_XMIT and | ||
2573 | ::NETIO_NO_RECV. See @ref link for more information on link management. | ||
2574 | */ | ||
2575 | #define NETIO_AUTO_LINK_NONE _NETIO_AUTO_PRESENT | ||
2576 | |||
2577 | |||
2578 | /** Minimum number of receive packets. */ | ||
2579 | #define NETIO_MIN_RECEIVE_PKTS 16 | ||
2580 | |||
2581 | /** Lower bound on the maximum number of receive packets; may be higher | ||
2582 | than this on some interfaces. */ | ||
2583 | #define NETIO_MAX_RECEIVE_PKTS 128 | ||
2584 | |||
2585 | /** Maximum number of send buffers, per packet size. */ | ||
2586 | #define NETIO_MAX_SEND_BUFFERS 16 | ||
2587 | |||
2588 | /** Number of EPP queue slots, and thus outstanding sends, per EPP. */ | ||
2589 | #define NETIO_TOTAL_SENDS_OUTSTANDING 2015 | ||
2590 | |||
2591 | /** Minimum number of EPP queue slots, and thus outstanding sends, per | ||
2592 | * transmitting tile. */ | ||
2593 | #define NETIO_MIN_SENDS_OUTSTANDING 16 | ||
2594 | |||
2595 | |||
2596 | /**@}*/ | ||
2597 | |||
2598 | #ifndef __DOXYGEN__ | ||
2599 | |||
2600 | /** | ||
2601 | * An object for providing Ethernet packets to a process. | ||
2602 | */ | ||
2603 | struct __netio_queue_impl_t; | ||
2604 | |||
2605 | /** | ||
2606 | * An object for managing the user end of a NetIO queue. | ||
2607 | */ | ||
2608 | struct __netio_queue_user_impl_t; | ||
2609 | |||
2610 | #endif /* !__DOXYGEN__ */ | ||
2611 | |||
2612 | |||
2613 | /** A netio_queue_t describes a NetIO communications endpoint. | ||
2614 | * @ingroup setup | ||
2615 | */ | ||
2616 | typedef struct | ||
2617 | { | ||
2618 | #ifdef __DOXYGEN__ | ||
2619 | uint8_t opaque[8]; /**< This is an opaque structure. */ | ||
2620 | #else | ||
2621 | struct __netio_queue_impl_t* __system_part; /**< The system part. */ | ||
2622 | struct __netio_queue_user_impl_t* __user_part; /**< The user part. */ | ||
2623 | #ifdef _NETIO_PTHREAD | ||
2624 | _netio_percpu_mutex_t lock; /**< Queue lock. */ | ||
2625 | #endif | ||
2626 | #endif | ||
2627 | } | ||
2628 | netio_queue_t; | ||
2629 | |||
2630 | |||
2631 | /** | ||
2632 | * @brief Packet send context. | ||
2633 | * | ||
2634 | * @ingroup egress | ||
2635 | * | ||
2636 | * Packet send context for use with netio_send_packet_prepare and _commit. | ||
2637 | */ | ||
2638 | typedef struct | ||
2639 | { | ||
2640 | #ifdef __DOXYGEN__ | ||
2641 | uint8_t opaque[44]; /**< This is an opaque structure. */ | ||
2642 | #else | ||
2643 | uint8_t flags; /**< Defined below */ | ||
2644 | uint8_t datalen; /**< Number of valid words pointed to by data. */ | ||
2645 | uint32_t request[9]; /**< Request to be sent to the EPP or shim. Note | ||
2646 | that this is smaller than the 11-word maximum | ||
2647 | request size, since some constant values are | ||
2648 | not saved in the context. */ | ||
2649 | uint32_t *data; /**< Data to be sent to the EPP or shim via IDN. */ | ||
2650 | #endif | ||
2651 | } | ||
2652 | netio_send_pkt_context_t; | ||
2653 | |||
2654 | |||
2655 | #ifndef __DOXYGEN__ | ||
2656 | #define SEND_PKT_CTX_USE_EPP 1 /**< We're sending to an EPP. */ | ||
2657 | #define SEND_PKT_CTX_SEND_CSUM 2 /**< Request includes a checksum. */ | ||
2658 | #endif | ||
2659 | |||
2660 | /** | ||
2661 | * @brief Packet vector entry. | ||
2662 | * | ||
2663 | * @ingroup egress | ||
2664 | * | ||
2665 | * This data structure is used with netio_send_packet_vector() to send multiple | ||
2666 | * packets with one NetIO call. The structure should be initialized by | ||
2667 | * calling netio_pkt_vector_set(), rather than by setting the fields | ||
2668 | * directly. | ||
2669 | * | ||
2670 | * This structure is guaranteed to be a power of two in size, no | ||
2671 | * bigger than one L2 cache line, and to be aligned modulo its size. | ||
2672 | */ | ||
2673 | typedef struct | ||
2674 | #ifndef __DOXYGEN__ | ||
2675 | __attribute__((aligned(8))) | ||
2676 | #endif | ||
2677 | { | ||
2678 | /** Reserved for use by the user application. When initialized with | ||
2679 | * the netio_set_pkt_vector_entry() function, this field is guaranteed | ||
2680 | * to be visible to readers only after all other fields are already | ||
2681 | * visible. This way it can be used as a valid flag or generation | ||
2682 | * counter. */ | ||
2683 | uint8_t user_data; | ||
2684 | |||
2685 | /* Structure members below this point should not be accessed directly by | ||
2686 | * applications, as they may change in the future. */ | ||
2687 | |||
2688 | /** Low 8 bits of the packet address to send. The high bits are | ||
2689 | * acquired from the 'handle' field. */ | ||
2690 | uint8_t buffer_address_low; | ||
2691 | |||
2692 | /** Number of bytes to transmit. */ | ||
2693 | uint16_t size; | ||
2694 | |||
2695 | /** The raw handle from a netio_pkt_t. If this is NETIO_PKT_HANDLE_NONE, | ||
2696 | * this vector entry will be skipped and no packet will be transmitted. */ | ||
2697 | netio_pkt_handle_t handle; | ||
2698 | } | ||
2699 | netio_pkt_vector_entry_t; | ||
2700 | |||
2701 | |||
2702 | /** | ||
2703 | * @brief Initialize fields in a packet vector entry. | ||
2704 | * | ||
2705 | * @ingroup egress | ||
2706 | * | ||
2707 | * @param[out] v Pointer to the vector entry to be initialized. | ||
2708 | * @param[in] pkt Packet to be transmitted when the vector entry is passed to | ||
2709 | * netio_send_packet_vector(). Note that the packet's attributes | ||
2710 | * (e.g., its L2 offset and length) are captured at the time this | ||
2711 | * routine is called; subsequent changes in those attributes will not | ||
2712 | * be reflected in the packet which is actually transmitted. | ||
2713 | * Changes in the packet's contents, however, will be so reflected. | ||
2714 | * If this is NULL, no packet will be transmitted. | ||
2715 | * @param[in] user_data User data to be set in the vector entry. | ||
2716 | * This function guarantees that the "user_data" field will become | ||
2717 | * visible to a reader only after all other fields have become visible. | ||
2718 | * This allows a structure in a ring buffer to be written and read | ||
2719 | * by a polling reader without any locks or other synchronization. | ||
2720 | */ | ||
2721 | static __inline void | ||
2722 | netio_pkt_vector_set(volatile netio_pkt_vector_entry_t* v, netio_pkt_t* pkt, | ||
2723 | uint8_t user_data) | ||
2724 | { | ||
2725 | if (pkt) | ||
2726 | { | ||
2727 | if (NETIO_PKT_IS_MINIMAL(pkt)) | ||
2728 | { | ||
2729 | netio_pkt_minimal_metadata_t* mmd = | ||
2730 | (netio_pkt_minimal_metadata_t*) &pkt->__metadata; | ||
2731 | v->buffer_address_low = (uintptr_t) NETIO_PKT_L2_DATA_MM(mmd, pkt) & 0xFF; | ||
2732 | v->size = NETIO_PKT_L2_LENGTH_MM(mmd, pkt); | ||
2733 | } | ||
2734 | else | ||
2735 | { | ||
2736 | netio_pkt_metadata_t* mda = &pkt->__metadata; | ||
2737 | v->buffer_address_low = (uintptr_t) NETIO_PKT_L2_DATA_M(mda, pkt) & 0xFF; | ||
2738 | v->size = NETIO_PKT_L2_LENGTH_M(mda, pkt); | ||
2739 | } | ||
2740 | v->handle.word = pkt->__packet.word; | ||
2741 | } | ||
2742 | else | ||
2743 | { | ||
2744 | v->handle.word = 0; /* Set handle to NETIO_PKT_HANDLE_NONE. */ | ||
2745 | } | ||
2746 | |||
2747 | __asm__("" : : : "memory"); | ||
2748 | |||
2749 | v->user_data = user_data; | ||
2750 | } | ||
2751 | |||
2752 | |||
2753 | /** | ||
2754 | * Flags and structures for @ref netio_get() and @ref netio_set(). | ||
2755 | * @ingroup config | ||
2756 | */ | ||
2757 | |||
2758 | /** @{ */ | ||
2759 | /** Parameter class; addr is a NETIO_PARAM_xxx value. */ | ||
2760 | #define NETIO_PARAM 0 | ||
2761 | /** Interface MAC address. This address is only valid with @ref netio_get(). | ||
2762 | * The value is a 6-byte MAC address. Depending upon the overall system | ||
2763 | * design, a MAC address may or may not be available for each interface. */ | ||
2764 | #define NETIO_PARAM_MAC 0 | ||
2765 | |||
2766 | /** Determine whether to suspend output on the receipt of pause frames. | ||
2767 | * If the value is nonzero, the I/O shim will suspend output when a pause | ||
2768 | * frame is received. If the value is zero, pause frames will be ignored. */ | ||
2769 | #define NETIO_PARAM_PAUSE_IN 1 | ||
2770 | |||
2771 | /** Determine whether to send pause frames if the I/O shim packet FIFOs are | ||
2772 | * nearly full. If the value is zero, pause frames are not sent. If | ||
2773 | * the value is nonzero, it is the delay value which will be sent in any | ||
2774 | * pause frames which are output, in units of 512 bit times. */ | ||
2775 | #define NETIO_PARAM_PAUSE_OUT 2 | ||
2776 | |||
2777 | /** Jumbo frame support. The value is a 4-byte integer. If the value is | ||
2778 | * nonzero, the MAC will accept frames of up to 10240 bytes. If the value | ||
2779 | * is zero, the MAC will only accept frames of up to 1544 bytes. */ | ||
2780 | #define NETIO_PARAM_JUMBO 3 | ||
2781 | |||
2782 | /** I/O shim's overflow statistics register. The value is two 16-bit integers. | ||
2783 | * The first 16-bit value (or the low 16 bits, if the value is treated as a | ||
2784 | * 32-bit number) is the count of packets which were completely dropped and | ||
2785 | * not delivered by the shim. The second 16-bit value (or the high 16 bits, | ||
2786 | * if the value is treated as a 32-bit number) is the count of packets | ||
2787 | * which were truncated and thus only partially delivered by the shim. This | ||
2788 | * register is automatically reset to zero after it has been read. | ||
2789 | */ | ||
2790 | #define NETIO_PARAM_OVERFLOW 4 | ||
2791 | |||
2792 | /** IPP statistics. This address is only valid with @ref netio_get(). The | ||
2793 | * value is a netio_stat_t structure. Unlike the I/O shim statistics, the | ||
2794 | * IPP statistics are not all reset to zero on read; see the description | ||
2795 | * of the netio_stat_t for details. */ | ||
2796 | #define NETIO_PARAM_STAT 5 | ||
2797 | |||
2798 | /** Possible link state. The value is a combination of "NETIO_LINK_xxx" | ||
2799 | * flags. With @ref netio_get(), this will indicate which flags are | ||
2800 | * actually supported by the hardware. | ||
2801 | * | ||
2802 | * For historical reasons, specifying this value to netio_set() will have | ||
2803 | * the same behavior as using ::NETIO_PARAM_LINK_CONFIG, but this usage is | ||
2804 | * discouraged. | ||
2805 | */ | ||
2806 | #define NETIO_PARAM_LINK_POSSIBLE_STATE 6 | ||
2807 | |||
2808 | /** Link configuration. The value is a combination of "NETIO_LINK_xxx" flags. | ||
2809 | * With @ref netio_set(), this will attempt to immediately bring up the | ||
2810 | * link using whichever of the requested flags are supported by the | ||
2811 | * hardware, or take down the link if the flags are zero; if this is | ||
2812 | * not possible, an error will be returned. Many programs will want | ||
2813 | * to use ::NETIO_PARAM_LINK_DESIRED_STATE instead. | ||
2814 | * | ||
2815 | * For historical reasons, specifying this value to netio_get() will | ||
2816 | * have the same behavior as using ::NETIO_PARAM_LINK_POSSIBLE_STATE, | ||
2817 | * but this usage is discouraged. | ||
2818 | */ | ||
2819 | #define NETIO_PARAM_LINK_CONFIG NETIO_PARAM_LINK_POSSIBLE_STATE | ||
2820 | |||
2821 | /** Current link state. This address is only valid with @ref netio_get(). | ||
2822 | * The value is zero or more of the "NETIO_LINK_xxx" flags, ORed together. | ||
2823 | * If the link is down, the value ANDed with NETIO_LINK_SPEED will be | ||
2824 | * zero; if the link is up, the value ANDed with NETIO_LINK_SPEED will | ||
2825 | * result in exactly one of the NETIO_LINK_xxx values, indicating the | ||
2826 | * current speed. */ | ||
2827 | #define NETIO_PARAM_LINK_CURRENT_STATE 7 | ||
2828 | |||
2829 | /** Variant symbol for current state, retained for compatibility with | ||
2830 | * pre-MDE-2.1 programs. */ | ||
2831 | #define NETIO_PARAM_LINK_STATUS NETIO_PARAM_LINK_CURRENT_STATE | ||
2832 | |||
2833 | /** Packet Coherence protocol. This address is only valid with @ref netio_get(). | ||
2834 | * The value is nonzero if the interface is configured for cache-coherent DMA. | ||
2835 | */ | ||
2836 | #define NETIO_PARAM_COHERENT 8 | ||
2837 | |||
2838 | /** Desired link state. The value is a conbination of "NETIO_LINK_xxx" | ||
2839 | * flags, which specify the desired state for the link. With @ref | ||
2840 | * netio_set(), this will, in the background, attempt to bring up the link | ||
2841 | * using whichever of the requested flags are reasonable, or take down the | ||
2842 | * link if the flags are zero. The actual link up or down operation may | ||
2843 | * happen after this call completes. If the link state changes in the | ||
2844 | * future, the system will continue to try to get back to the desired link | ||
2845 | * state; for instance, if the link is brought up successfully, and then | ||
2846 | * the network cable is disconnected, the link will go down. However, the | ||
2847 | * desired state of the link is still up, so if the cable is reconnected, | ||
2848 | * the link will be brought up again. | ||
2849 | * | ||
2850 | * With @ref netio_get(), this will indicate the desired state for the | ||
2851 | * link, as set with a previous netio_set() call, or implicitly by a | ||
2852 | * netio_input_register() or netio_input_unregister() operation. This may | ||
2853 | * not reflect the current state of the link; to get that, use | ||
2854 | * ::NETIO_PARAM_LINK_CURRENT_STATE. */ | ||
2855 | #define NETIO_PARAM_LINK_DESIRED_STATE 9 | ||
2856 | |||
2857 | /** NetIO statistics structure. Retrieved using the ::NETIO_PARAM_STAT | ||
2858 | * address passed to @ref netio_get(). */ | ||
2859 | typedef struct | ||
2860 | { | ||
2861 | /** Number of packets which have been received by the IPP and forwarded | ||
2862 | * to a tile's receive queue for processing. This value wraps at its | ||
2863 | * maximum, and is not cleared upon read. */ | ||
2864 | uint32_t packets_received; | ||
2865 | |||
2866 | /** Number of packets which have been dropped by the IPP, because they could | ||
2867 | * not be received, or could not be forwarded to a tile. The former happens | ||
2868 | * when the IPP does not have a free packet buffer of suitable size for an | ||
2869 | * incoming frame. The latter happens when all potential destination tiles | ||
2870 | * for a packet, as defined by the group, bucket, and queue configuration, | ||
2871 | * have full receive queues. This value wraps at its maximum, and is not | ||
2872 | * cleared upon read. */ | ||
2873 | uint32_t packets_dropped; | ||
2874 | |||
2875 | /* | ||
2876 | * Note: the #defines after each of the following four one-byte values | ||
2877 | * denote their location within the third word of the netio_stat_t. They | ||
2878 | * are intended for use only by the IPP implementation and are thus omitted | ||
2879 | * from the Doxygen output. | ||
2880 | */ | ||
2881 | |||
2882 | /** Number of packets dropped because no worker was able to accept a new | ||
2883 | * packet. This value saturates at its maximum, and is cleared upon | ||
2884 | * read. */ | ||
2885 | uint8_t drops_no_worker; | ||
2886 | #ifndef __DOXYGEN__ | ||
2887 | #define NETIO_STAT_DROPS_NO_WORKER 0 | ||
2888 | #endif | ||
2889 | |||
2890 | /** Number of packets dropped because no small buffers were available. | ||
2891 | * This value saturates at its maximum, and is cleared upon read. */ | ||
2892 | uint8_t drops_no_smallbuf; | ||
2893 | #ifndef __DOXYGEN__ | ||
2894 | #define NETIO_STAT_DROPS_NO_SMALLBUF 1 | ||
2895 | #endif | ||
2896 | |||
2897 | /** Number of packets dropped because no large buffers were available. | ||
2898 | * This value saturates at its maximum, and is cleared upon read. */ | ||
2899 | uint8_t drops_no_largebuf; | ||
2900 | #ifndef __DOXYGEN__ | ||
2901 | #define NETIO_STAT_DROPS_NO_LARGEBUF 2 | ||
2902 | #endif | ||
2903 | |||
2904 | /** Number of packets dropped because no jumbo buffers were available. | ||
2905 | * This value saturates at its maximum, and is cleared upon read. */ | ||
2906 | uint8_t drops_no_jumbobuf; | ||
2907 | #ifndef __DOXYGEN__ | ||
2908 | #define NETIO_STAT_DROPS_NO_JUMBOBUF 3 | ||
2909 | #endif | ||
2910 | } | ||
2911 | netio_stat_t; | ||
2912 | |||
2913 | |||
2914 | /** Link can run, should run, or is running at 10 Mbps. */ | ||
2915 | #define NETIO_LINK_10M 0x01 | ||
2916 | |||
2917 | /** Link can run, should run, or is running at 100 Mbps. */ | ||
2918 | #define NETIO_LINK_100M 0x02 | ||
2919 | |||
2920 | /** Link can run, should run, or is running at 1 Gbps. */ | ||
2921 | #define NETIO_LINK_1G 0x04 | ||
2922 | |||
2923 | /** Link can run, should run, or is running at 10 Gbps. */ | ||
2924 | #define NETIO_LINK_10G 0x08 | ||
2925 | |||
2926 | /** Link should run at the highest speed supported by the link and by | ||
2927 | * the device connected to the link. Only usable as a value for | ||
2928 | * the link's desired state; never returned as a value for the current | ||
2929 | * or possible states. */ | ||
2930 | #define NETIO_LINK_ANYSPEED 0x10 | ||
2931 | |||
2932 | /** All legal link speeds. */ | ||
2933 | #define NETIO_LINK_SPEED (NETIO_LINK_10M | \ | ||
2934 | NETIO_LINK_100M | \ | ||
2935 | NETIO_LINK_1G | \ | ||
2936 | NETIO_LINK_10G | \ | ||
2937 | NETIO_LINK_ANYSPEED) | ||
2938 | |||
2939 | |||
2940 | /** MAC register class. Addr is a register offset within the MAC. | ||
2941 | * Registers within the XGbE and GbE MACs are documented in the Tile | ||
2942 | * Processor I/O Device Guide (UG104). MAC registers start at address | ||
2943 | * 0x4000, and do not include the MAC_INTERFACE registers. */ | ||
2944 | #define NETIO_MAC 1 | ||
2945 | |||
2946 | /** MDIO register class (IEEE 802.3 clause 22 format). Addr is the "addr" | ||
2947 | * member of a netio_mdio_addr_t structure. */ | ||
2948 | #define NETIO_MDIO 2 | ||
2949 | |||
2950 | /** MDIO register class (IEEE 802.3 clause 45 format). Addr is the "addr" | ||
2951 | * member of a netio_mdio_addr_t structure. */ | ||
2952 | #define NETIO_MDIO_CLAUSE45 3 | ||
2953 | |||
2954 | /** NetIO MDIO address type. Retrieved or provided using the ::NETIO_MDIO | ||
2955 | * address passed to @ref netio_get() or @ref netio_set(). */ | ||
2956 | typedef union | ||
2957 | { | ||
2958 | struct | ||
2959 | { | ||
2960 | unsigned int reg:16; /**< MDIO register offset. For clause 22 access, | ||
2961 | must be less than 32. */ | ||
2962 | unsigned int phy:5; /**< Which MDIO PHY to access. */ | ||
2963 | unsigned int dev:5; /**< Which MDIO device to access within that PHY. | ||
2964 | Applicable for clause 45 access only; ignored | ||
2965 | for clause 22 access. */ | ||
2966 | } | ||
2967 | bits; /**< Container for bitfields. */ | ||
2968 | uint64_t addr; /**< Value to pass to @ref netio_get() or | ||
2969 | * @ref netio_set(). */ | ||
2970 | } | ||
2971 | netio_mdio_addr_t; | ||
2972 | |||
2973 | /** @} */ | ||
2974 | |||
2975 | #endif /* __NETIO_INTF_H__ */ | ||
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile index 112b1e248f05..b4c8e8ec45dc 100644 --- a/arch/tile/kernel/Makefile +++ b/arch/tile/kernel/Makefile | |||
@@ -15,3 +15,4 @@ obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o | |||
15 | obj-$(CONFIG_MODULES) += module.o | 15 | obj-$(CONFIG_MODULES) += module.o |
16 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 16 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
17 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o | 17 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o |
18 | obj-$(CONFIG_PCI) += pci.o | ||
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c index 67617a05e602..dbc213adf5e1 100644 --- a/arch/tile/kernel/compat.c +++ b/arch/tile/kernel/compat.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/kdev_t.h> | 21 | #include <linux/kdev_t.h> |
22 | #include <linux/fs.h> | 22 | #include <linux/fs.h> |
23 | #include <linux/fcntl.h> | 23 | #include <linux/fcntl.h> |
24 | #include <linux/smp_lock.h> | ||
25 | #include <linux/uaccess.h> | 24 | #include <linux/uaccess.h> |
26 | #include <linux/signal.h> | 25 | #include <linux/signal.h> |
27 | #include <asm/syscalls.h> | 26 | #include <asm/syscalls.h> |
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index fb64b99959d4..543d6a33aa26 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/smp_lock.h> | ||
19 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
20 | #include <linux/signal.h> | 19 | #include <linux/signal.h> |
21 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c new file mode 100644 index 000000000000..a1ee25be9ad9 --- /dev/null +++ b/arch/tile/kernel/pci.c | |||
@@ -0,0 +1,621 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/pci.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/capability.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/bootmem.h> | ||
24 | #include <linux/irq.h> | ||
25 | #include <linux/io.h> | ||
26 | #include <linux/uaccess.h> | ||
27 | |||
28 | #include <asm/processor.h> | ||
29 | #include <asm/sections.h> | ||
30 | #include <asm/byteorder.h> | ||
31 | #include <asm/hv_driver.h> | ||
32 | #include <hv/drv_pcie_rc_intf.h> | ||
33 | |||
34 | |||
35 | /* | ||
36 | * Initialization flow and process | ||
37 | * ------------------------------- | ||
38 | * | ||
39 | * This files containes the routines to search for PCI buses, | ||
40 | * enumerate the buses, and configure any attached devices. | ||
41 | * | ||
42 | * There are two entry points here: | ||
43 | * 1) tile_pci_init | ||
44 | * This sets up the pci_controller structs, and opens the | ||
45 | * FDs to the hypervisor. This is called from setup_arch() early | ||
46 | * in the boot process. | ||
47 | * 2) pcibios_init | ||
48 | * This probes the PCI bus(es) for any attached hardware. It's | ||
49 | * called by subsys_initcall. All of the real work is done by the | ||
50 | * generic Linux PCI layer. | ||
51 | * | ||
52 | */ | ||
53 | |||
54 | /* | ||
55 | * This flag tells if the platform is TILEmpower that needs | ||
56 | * special configuration for the PLX switch chip. | ||
57 | */ | ||
58 | int __write_once tile_plx_gen1; | ||
59 | |||
60 | static struct pci_controller controllers[TILE_NUM_PCIE]; | ||
61 | static int num_controllers; | ||
62 | |||
63 | static struct pci_ops tile_cfg_ops; | ||
64 | |||
65 | |||
66 | /* | ||
67 | * We don't need to worry about the alignment of resources. | ||
68 | */ | ||
69 | resource_size_t pcibios_align_resource(void *data, const struct resource *res, | ||
70 | resource_size_t size, resource_size_t align) | ||
71 | { | ||
72 | return res->start; | ||
73 | } | ||
74 | EXPORT_SYMBOL(pcibios_align_resource); | ||
75 | |||
76 | /* | ||
77 | * Open a FD to the hypervisor PCI device. | ||
78 | * | ||
79 | * controller_id is the controller number, config type is 0 or 1 for | ||
80 | * config0 or config1 operations. | ||
81 | */ | ||
82 | static int __init tile_pcie_open(int controller_id, int config_type) | ||
83 | { | ||
84 | char filename[32]; | ||
85 | int fd; | ||
86 | |||
87 | sprintf(filename, "pcie/%d/config%d", controller_id, config_type); | ||
88 | |||
89 | fd = hv_dev_open((HV_VirtAddr)filename, 0); | ||
90 | |||
91 | return fd; | ||
92 | } | ||
93 | |||
94 | |||
95 | /* | ||
96 | * Get the IRQ numbers from the HV and set up the handlers for them. | ||
97 | */ | ||
98 | static int __init tile_init_irqs(int controller_id, | ||
99 | struct pci_controller *controller) | ||
100 | { | ||
101 | char filename[32]; | ||
102 | int fd; | ||
103 | int ret; | ||
104 | int x; | ||
105 | struct pcie_rc_config rc_config; | ||
106 | |||
107 | sprintf(filename, "pcie/%d/ctl", controller_id); | ||
108 | fd = hv_dev_open((HV_VirtAddr)filename, 0); | ||
109 | if (fd < 0) { | ||
110 | pr_err("PCI: hv_dev_open(%s) failed\n", filename); | ||
111 | return -1; | ||
112 | } | ||
113 | ret = hv_dev_pread(fd, 0, (HV_VirtAddr)(&rc_config), | ||
114 | sizeof(rc_config), PCIE_RC_CONFIG_MASK_OFF); | ||
115 | hv_dev_close(fd); | ||
116 | if (ret != sizeof(rc_config)) { | ||
117 | pr_err("PCI: wanted %zd bytes, got %d\n", | ||
118 | sizeof(rc_config), ret); | ||
119 | return -1; | ||
120 | } | ||
121 | /* Record irq_base so that we can map INTx to IRQ # later. */ | ||
122 | controller->irq_base = rc_config.intr; | ||
123 | |||
124 | for (x = 0; x < 4; x++) | ||
125 | tile_irq_activate(rc_config.intr + x, | ||
126 | TILE_IRQ_HW_CLEAR); | ||
127 | |||
128 | if (rc_config.plx_gen1) | ||
129 | controller->plx_gen1 = 1; | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * First initialization entry point, called from setup_arch(). | ||
136 | * | ||
137 | * Find valid controllers and fill in pci_controller structs for each | ||
138 | * of them. | ||
139 | * | ||
140 | * Returns the number of controllers discovered. | ||
141 | */ | ||
142 | int __init tile_pci_init(void) | ||
143 | { | ||
144 | int i; | ||
145 | |||
146 | pr_info("PCI: Searching for controllers...\n"); | ||
147 | |||
148 | /* Do any configuration we need before using the PCIe */ | ||
149 | |||
150 | for (i = 0; i < TILE_NUM_PCIE; i++) { | ||
151 | int hv_cfg_fd0 = -1; | ||
152 | int hv_cfg_fd1 = -1; | ||
153 | int hv_mem_fd = -1; | ||
154 | char name[32]; | ||
155 | struct pci_controller *controller; | ||
156 | |||
157 | /* | ||
158 | * Open the fd to the HV. If it fails then this | ||
159 | * device doesn't exist. | ||
160 | */ | ||
161 | hv_cfg_fd0 = tile_pcie_open(i, 0); | ||
162 | if (hv_cfg_fd0 < 0) | ||
163 | continue; | ||
164 | hv_cfg_fd1 = tile_pcie_open(i, 1); | ||
165 | if (hv_cfg_fd1 < 0) { | ||
166 | pr_err("PCI: Couldn't open config fd to HV " | ||
167 | "for controller %d\n", i); | ||
168 | goto err_cont; | ||
169 | } | ||
170 | |||
171 | sprintf(name, "pcie/%d/mem", i); | ||
172 | hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0); | ||
173 | if (hv_mem_fd < 0) { | ||
174 | pr_err("PCI: Could not open mem fd to HV!\n"); | ||
175 | goto err_cont; | ||
176 | } | ||
177 | |||
178 | pr_info("PCI: Found PCI controller #%d\n", i); | ||
179 | |||
180 | controller = &controllers[num_controllers]; | ||
181 | |||
182 | if (tile_init_irqs(i, controller)) { | ||
183 | pr_err("PCI: Could not initialize " | ||
184 | "IRQs, aborting.\n"); | ||
185 | goto err_cont; | ||
186 | } | ||
187 | |||
188 | controller->index = num_controllers; | ||
189 | controller->hv_cfg_fd[0] = hv_cfg_fd0; | ||
190 | controller->hv_cfg_fd[1] = hv_cfg_fd1; | ||
191 | controller->hv_mem_fd = hv_mem_fd; | ||
192 | controller->first_busno = 0; | ||
193 | controller->last_busno = 0xff; | ||
194 | controller->ops = &tile_cfg_ops; | ||
195 | |||
196 | num_controllers++; | ||
197 | continue; | ||
198 | |||
199 | err_cont: | ||
200 | if (hv_cfg_fd0 >= 0) | ||
201 | hv_dev_close(hv_cfg_fd0); | ||
202 | if (hv_cfg_fd1 >= 0) | ||
203 | hv_dev_close(hv_cfg_fd1); | ||
204 | if (hv_mem_fd >= 0) | ||
205 | hv_dev_close(hv_mem_fd); | ||
206 | continue; | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * Before using the PCIe, see if we need to do any platform-specific | ||
211 | * configuration, such as the PLX switch Gen 1 issue on TILEmpower. | ||
212 | */ | ||
213 | for (i = 0; i < num_controllers; i++) { | ||
214 | struct pci_controller *controller = &controllers[i]; | ||
215 | |||
216 | if (controller->plx_gen1) | ||
217 | tile_plx_gen1 = 1; | ||
218 | } | ||
219 | |||
220 | return num_controllers; | ||
221 | } | ||
222 | |||
223 | /* | ||
224 | * (pin - 1) converts from the PCI standard's [1:4] convention to | ||
225 | * a normal [0:3] range. | ||
226 | */ | ||
227 | static int tile_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
228 | { | ||
229 | struct pci_controller *controller = | ||
230 | (struct pci_controller *)dev->sysdata; | ||
231 | return (pin - 1) + controller->irq_base; | ||
232 | } | ||
233 | |||
234 | |||
235 | static void __init fixup_read_and_payload_sizes(void) | ||
236 | { | ||
237 | struct pci_dev *dev = NULL; | ||
238 | int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */ | ||
239 | int max_read_size = 0x2; /* Limit to 512 byte reads. */ | ||
240 | u16 new_values; | ||
241 | |||
242 | /* Scan for the smallest maximum payload size. */ | ||
243 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
244 | int pcie_caps_offset; | ||
245 | u32 devcap; | ||
246 | int max_payload; | ||
247 | |||
248 | pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
249 | if (pcie_caps_offset == 0) | ||
250 | continue; | ||
251 | |||
252 | pci_read_config_dword(dev, pcie_caps_offset + PCI_EXP_DEVCAP, | ||
253 | &devcap); | ||
254 | max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD; | ||
255 | if (max_payload < smallest_max_payload) | ||
256 | smallest_max_payload = max_payload; | ||
257 | } | ||
258 | |||
259 | /* Now, set the max_payload_size for all devices to that value. */ | ||
260 | new_values = (max_read_size << 12) | (smallest_max_payload << 5); | ||
261 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
262 | int pcie_caps_offset; | ||
263 | u16 devctl; | ||
264 | |||
265 | pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
266 | if (pcie_caps_offset == 0) | ||
267 | continue; | ||
268 | |||
269 | pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, | ||
270 | &devctl); | ||
271 | devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ); | ||
272 | devctl |= new_values; | ||
273 | pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, | ||
274 | devctl); | ||
275 | } | ||
276 | } | ||
277 | |||
278 | |||
279 | /* | ||
280 | * Second PCI initialization entry point, called by subsys_initcall. | ||
281 | * | ||
282 | * The controllers have been set up by the time we get here, by a call to | ||
283 | * tile_pci_init. | ||
284 | */ | ||
285 | static int __init pcibios_init(void) | ||
286 | { | ||
287 | int i; | ||
288 | |||
289 | pr_info("PCI: Probing PCI hardware\n"); | ||
290 | |||
291 | /* | ||
292 | * Delay a bit in case devices aren't ready. Some devices are | ||
293 | * known to require at least 20ms here, but we use a more | ||
294 | * conservative value. | ||
295 | */ | ||
296 | mdelay(250); | ||
297 | |||
298 | /* Scan all of the recorded PCI controllers. */ | ||
299 | for (i = 0; i < num_controllers; i++) { | ||
300 | struct pci_controller *controller = &controllers[i]; | ||
301 | struct pci_bus *bus; | ||
302 | |||
303 | pr_info("PCI: initializing controller #%d\n", i); | ||
304 | |||
305 | /* | ||
306 | * This comes from the generic Linux PCI driver. | ||
307 | * | ||
308 | * It reads the PCI tree for this bus into the Linux | ||
309 | * data structures. | ||
310 | * | ||
311 | * This is inlined in linux/pci.h and calls into | ||
312 | * pci_scan_bus_parented() in probe.c. | ||
313 | */ | ||
314 | bus = pci_scan_bus(0, controller->ops, controller); | ||
315 | controller->root_bus = bus; | ||
316 | controller->last_busno = bus->subordinate; | ||
317 | |||
318 | } | ||
319 | |||
320 | /* Do machine dependent PCI interrupt routing */ | ||
321 | pci_fixup_irqs(pci_common_swizzle, tile_map_irq); | ||
322 | |||
323 | /* | ||
324 | * This comes from the generic Linux PCI driver. | ||
325 | * | ||
326 | * It allocates all of the resources (I/O memory, etc) | ||
327 | * associated with the devices read in above. | ||
328 | */ | ||
329 | |||
330 | pci_assign_unassigned_resources(); | ||
331 | |||
332 | /* Configure the max_read_size and max_payload_size values. */ | ||
333 | fixup_read_and_payload_sizes(); | ||
334 | |||
335 | /* Record the I/O resources in the PCI controller structure. */ | ||
336 | for (i = 0; i < num_controllers; i++) { | ||
337 | struct pci_bus *root_bus = controllers[i].root_bus; | ||
338 | struct pci_bus *next_bus; | ||
339 | struct pci_dev *dev; | ||
340 | |||
341 | list_for_each_entry(dev, &root_bus->devices, bus_list) { | ||
342 | /* Find the PCI host controller, ie. the 1st bridge. */ | ||
343 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && | ||
344 | (PCI_SLOT(dev->devfn) == 0)) { | ||
345 | next_bus = dev->subordinate; | ||
346 | controllers[i].mem_resources[0] = | ||
347 | *next_bus->resource[0]; | ||
348 | controllers[i].mem_resources[1] = | ||
349 | *next_bus->resource[1]; | ||
350 | controllers[i].mem_resources[2] = | ||
351 | *next_bus->resource[2]; | ||
352 | |||
353 | break; | ||
354 | } | ||
355 | } | ||
356 | |||
357 | } | ||
358 | |||
359 | return 0; | ||
360 | } | ||
361 | subsys_initcall(pcibios_init); | ||
362 | |||
363 | /* | ||
364 | * No bus fixups needed. | ||
365 | */ | ||
366 | void __devinit pcibios_fixup_bus(struct pci_bus *bus) | ||
367 | { | ||
368 | /* Nothing needs to be done. */ | ||
369 | } | ||
370 | |||
371 | /* | ||
372 | * This can be called from the generic PCI layer, but doesn't need to | ||
373 | * do anything. | ||
374 | */ | ||
375 | char __devinit *pcibios_setup(char *str) | ||
376 | { | ||
377 | /* Nothing needs to be done. */ | ||
378 | return str; | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * This is called from the generic Linux layer. | ||
383 | */ | ||
384 | void __init pcibios_update_irq(struct pci_dev *dev, int irq) | ||
385 | { | ||
386 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); | ||
387 | } | ||
388 | |||
389 | /* | ||
390 | * Enable memory and/or address decoding, as appropriate, for the | ||
391 | * device described by the 'dev' struct. | ||
392 | * | ||
393 | * This is called from the generic PCI layer, and can be called | ||
394 | * for bridges or endpoints. | ||
395 | */ | ||
396 | int pcibios_enable_device(struct pci_dev *dev, int mask) | ||
397 | { | ||
398 | u16 cmd, old_cmd; | ||
399 | u8 header_type; | ||
400 | int i; | ||
401 | struct resource *r; | ||
402 | |||
403 | pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); | ||
404 | |||
405 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
406 | old_cmd = cmd; | ||
407 | if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { | ||
408 | /* | ||
409 | * For bridges, we enable both memory and I/O decoding | ||
410 | * in call cases. | ||
411 | */ | ||
412 | cmd |= PCI_COMMAND_IO; | ||
413 | cmd |= PCI_COMMAND_MEMORY; | ||
414 | } else { | ||
415 | /* | ||
416 | * For endpoints, we enable memory and/or I/O decoding | ||
417 | * only if they have a memory resource of that type. | ||
418 | */ | ||
419 | for (i = 0; i < 6; i++) { | ||
420 | r = &dev->resource[i]; | ||
421 | if (r->flags & IORESOURCE_UNSET) { | ||
422 | pr_err("PCI: Device %s not available " | ||
423 | "because of resource collisions\n", | ||
424 | pci_name(dev)); | ||
425 | return -EINVAL; | ||
426 | } | ||
427 | if (r->flags & IORESOURCE_IO) | ||
428 | cmd |= PCI_COMMAND_IO; | ||
429 | if (r->flags & IORESOURCE_MEM) | ||
430 | cmd |= PCI_COMMAND_MEMORY; | ||
431 | } | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * We only write the command if it changed. | ||
436 | */ | ||
437 | if (cmd != old_cmd) | ||
438 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) | ||
443 | { | ||
444 | unsigned long start = pci_resource_start(dev, bar); | ||
445 | unsigned long len = pci_resource_len(dev, bar); | ||
446 | unsigned long flags = pci_resource_flags(dev, bar); | ||
447 | |||
448 | if (!len) | ||
449 | return NULL; | ||
450 | if (max && len > max) | ||
451 | len = max; | ||
452 | |||
453 | if (!(flags & IORESOURCE_MEM)) { | ||
454 | pr_info("PCI: Trying to map invalid resource %#lx\n", flags); | ||
455 | start = 0; | ||
456 | } | ||
457 | |||
458 | return (void __iomem *)start; | ||
459 | } | ||
460 | EXPORT_SYMBOL(pci_iomap); | ||
461 | |||
462 | |||
463 | /**************************************************************** | ||
464 | * | ||
465 | * Tile PCI config space read/write routines | ||
466 | * | ||
467 | ****************************************************************/ | ||
468 | |||
469 | /* | ||
470 | * These are the normal read and write ops | ||
471 | * These are expanded with macros from pci_bus_read_config_byte() etc. | ||
472 | * | ||
473 | * devfn is the combined PCI slot & function. | ||
474 | * | ||
475 | * offset is in bytes, from the start of config space for the | ||
476 | * specified bus & slot. | ||
477 | */ | ||
478 | |||
479 | static int __devinit tile_cfg_read(struct pci_bus *bus, | ||
480 | unsigned int devfn, | ||
481 | int offset, | ||
482 | int size, | ||
483 | u32 *val) | ||
484 | { | ||
485 | struct pci_controller *controller = bus->sysdata; | ||
486 | int busnum = bus->number & 0xff; | ||
487 | int slot = (devfn >> 3) & 0x1f; | ||
488 | int function = devfn & 0x7; | ||
489 | u32 addr; | ||
490 | int config_mode = 1; | ||
491 | |||
492 | /* | ||
493 | * There is no bridge between the Tile and bus 0, so we | ||
494 | * use config0 to talk to bus 0. | ||
495 | * | ||
496 | * If we're talking to a bus other than zero then we | ||
497 | * must have found a bridge. | ||
498 | */ | ||
499 | if (busnum == 0) { | ||
500 | /* | ||
501 | * We fake an empty slot for (busnum == 0) && (slot > 0), | ||
502 | * since there is only one slot on bus 0. | ||
503 | */ | ||
504 | if (slot) { | ||
505 | *val = 0xFFFFFFFF; | ||
506 | return 0; | ||
507 | } | ||
508 | config_mode = 0; | ||
509 | } | ||
510 | |||
511 | addr = busnum << 20; /* Bus in 27:20 */ | ||
512 | addr |= slot << 15; /* Slot (device) in 19:15 */ | ||
513 | addr |= function << 12; /* Function is in 14:12 */ | ||
514 | addr |= (offset & 0xFFF); /* byte address in 0:11 */ | ||
515 | |||
516 | return hv_dev_pread(controller->hv_cfg_fd[config_mode], 0, | ||
517 | (HV_VirtAddr)(val), size, addr); | ||
518 | } | ||
519 | |||
520 | |||
521 | /* | ||
522 | * See tile_cfg_read() for relevent comments. | ||
523 | * Note that "val" is the value to write, not a pointer to that value. | ||
524 | */ | ||
525 | static int __devinit tile_cfg_write(struct pci_bus *bus, | ||
526 | unsigned int devfn, | ||
527 | int offset, | ||
528 | int size, | ||
529 | u32 val) | ||
530 | { | ||
531 | struct pci_controller *controller = bus->sysdata; | ||
532 | int busnum = bus->number & 0xff; | ||
533 | int slot = (devfn >> 3) & 0x1f; | ||
534 | int function = devfn & 0x7; | ||
535 | u32 addr; | ||
536 | int config_mode = 1; | ||
537 | HV_VirtAddr valp = (HV_VirtAddr)&val; | ||
538 | |||
539 | /* | ||
540 | * For bus 0 slot 0 we use config 0 accesses. | ||
541 | */ | ||
542 | if (busnum == 0) { | ||
543 | /* | ||
544 | * We fake an empty slot for (busnum == 0) && (slot > 0), | ||
545 | * since there is only one slot on bus 0. | ||
546 | */ | ||
547 | if (slot) | ||
548 | return 0; | ||
549 | config_mode = 0; | ||
550 | } | ||
551 | |||
552 | addr = busnum << 20; /* Bus in 27:20 */ | ||
553 | addr |= slot << 15; /* Slot (device) in 19:15 */ | ||
554 | addr |= function << 12; /* Function is in 14:12 */ | ||
555 | addr |= (offset & 0xFFF); /* byte address in 0:11 */ | ||
556 | |||
557 | #ifdef __BIG_ENDIAN | ||
558 | /* Point to the correct part of the 32-bit "val". */ | ||
559 | valp += 4 - size; | ||
560 | #endif | ||
561 | |||
562 | return hv_dev_pwrite(controller->hv_cfg_fd[config_mode], 0, | ||
563 | valp, size, addr); | ||
564 | } | ||
565 | |||
566 | |||
567 | static struct pci_ops tile_cfg_ops = { | ||
568 | .read = tile_cfg_read, | ||
569 | .write = tile_cfg_write, | ||
570 | }; | ||
571 | |||
572 | |||
573 | /* | ||
574 | * In the following, each PCI controller's mem_resources[1] | ||
575 | * represents its (non-prefetchable) PCI memory resource. | ||
576 | * mem_resources[0] and mem_resources[2] refer to its PCI I/O and | ||
577 | * prefetchable PCI memory resources, respectively. | ||
578 | * For more details, see pci_setup_bridge() in setup-bus.c. | ||
579 | * By comparing the target PCI memory address against the | ||
580 | * end address of controller 0, we can determine the controller | ||
581 | * that should accept the PCI memory access. | ||
582 | */ | ||
583 | #define TILE_READ(size, type) \ | ||
584 | type _tile_read##size(unsigned long addr) \ | ||
585 | { \ | ||
586 | type val; \ | ||
587 | int idx = 0; \ | ||
588 | if (addr > controllers[0].mem_resources[1].end && \ | ||
589 | addr > controllers[0].mem_resources[2].end) \ | ||
590 | idx = 1; \ | ||
591 | if (hv_dev_pread(controllers[idx].hv_mem_fd, 0, \ | ||
592 | (HV_VirtAddr)(&val), sizeof(type), addr)) \ | ||
593 | pr_err("PCI: read %zd bytes at 0x%lX failed\n", \ | ||
594 | sizeof(type), addr); \ | ||
595 | return val; \ | ||
596 | } \ | ||
597 | EXPORT_SYMBOL(_tile_read##size) | ||
598 | |||
599 | TILE_READ(b, u8); | ||
600 | TILE_READ(w, u16); | ||
601 | TILE_READ(l, u32); | ||
602 | TILE_READ(q, u64); | ||
603 | |||
604 | #define TILE_WRITE(size, type) \ | ||
605 | void _tile_write##size(type val, unsigned long addr) \ | ||
606 | { \ | ||
607 | int idx = 0; \ | ||
608 | if (addr > controllers[0].mem_resources[1].end && \ | ||
609 | addr > controllers[0].mem_resources[2].end) \ | ||
610 | idx = 1; \ | ||
611 | if (hv_dev_pwrite(controllers[idx].hv_mem_fd, 0, \ | ||
612 | (HV_VirtAddr)(&val), sizeof(type), addr)) \ | ||
613 | pr_err("PCI: write %zd bytes at 0x%lX failed\n", \ | ||
614 | sizeof(type), addr); \ | ||
615 | } \ | ||
616 | EXPORT_SYMBOL(_tile_write##size) | ||
617 | |||
618 | TILE_WRITE(b, u8); | ||
619 | TILE_WRITE(w, u16); | ||
620 | TILE_WRITE(l, u32); | ||
621 | TILE_WRITE(q, u64); | ||
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index fb0b3cbeae14..f18573643ed1 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -840,7 +840,7 @@ static int __init topology_init(void) | |||
840 | for_each_online_node(i) | 840 | for_each_online_node(i) |
841 | register_one_node(i); | 841 | register_one_node(i); |
842 | 842 | ||
843 | for_each_present_cpu(i) | 843 | for (i = 0; i < smp_height * smp_width; ++i) |
844 | register_cpu(&cpu_devices[i], i); | 844 | register_cpu(&cpu_devices[i], i); |
845 | 845 | ||
846 | return 0; | 846 | return 0; |
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c index 687719d4abd1..757407e36696 100644 --- a/arch/tile/kernel/signal.c +++ b/arch/tile/kernel/signal.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/smp_lock.h> | ||
20 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
21 | #include <linux/signal.h> | 20 | #include <linux/signal.h> |
22 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c index 74d62d098edf..b949edcec200 100644 --- a/arch/tile/kernel/smpboot.c +++ b/arch/tile/kernel/smpboot.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <linux/kernel_stat.h> | 20 | #include <linux/kernel_stat.h> |
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/bootmem.h> | 21 | #include <linux/bootmem.h> |
23 | #include <linux/notifier.h> | 22 | #include <linux/notifier.h> |
24 | #include <linux/cpu.h> | 23 | #include <linux/cpu.h> |
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c index 7e764669a022..e2187d24a9b4 100644 --- a/arch/tile/kernel/sys.c +++ b/arch/tile/kernel/sys.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
23 | #include <linux/smp_lock.h> | ||
24 | #include <linux/syscalls.h> | 23 | #include <linux/syscalls.h> |
25 | #include <linux/mman.h> | 24 | #include <linux/mman.h> |
26 | #include <linux/file.h> | 25 | #include <linux/file.h> |
diff --git a/arch/tile/lib/memchr_32.c b/arch/tile/lib/memchr_32.c index 6235283b4859..cc3d9badf030 100644 --- a/arch/tile/lib/memchr_32.c +++ b/arch/tile/lib/memchr_32.c | |||
@@ -18,12 +18,24 @@ | |||
18 | 18 | ||
19 | void *memchr(const void *s, int c, size_t n) | 19 | void *memchr(const void *s, int c, size_t n) |
20 | { | 20 | { |
21 | const uint32_t *last_word_ptr; | ||
22 | const uint32_t *p; | ||
23 | const char *last_byte_ptr; | ||
24 | uintptr_t s_int; | ||
25 | uint32_t goal, before_mask, v, bits; | ||
26 | char *ret; | ||
27 | |||
28 | if (__builtin_expect(n == 0, 0)) { | ||
29 | /* Don't dereference any memory if the array is empty. */ | ||
30 | return NULL; | ||
31 | } | ||
32 | |||
21 | /* Get an aligned pointer. */ | 33 | /* Get an aligned pointer. */ |
22 | const uintptr_t s_int = (uintptr_t) s; | 34 | s_int = (uintptr_t) s; |
23 | const uint32_t *p = (const uint32_t *)(s_int & -4); | 35 | p = (const uint32_t *)(s_int & -4); |
24 | 36 | ||
25 | /* Create four copies of the byte for which we are looking. */ | 37 | /* Create four copies of the byte for which we are looking. */ |
26 | const uint32_t goal = 0x01010101 * (uint8_t) c; | 38 | goal = 0x01010101 * (uint8_t) c; |
27 | 39 | ||
28 | /* Read the first word, but munge it so that bytes before the array | 40 | /* Read the first word, but munge it so that bytes before the array |
29 | * will not match goal. | 41 | * will not match goal. |
@@ -31,23 +43,14 @@ void *memchr(const void *s, int c, size_t n) | |||
31 | * Note that this shift count expression works because we know | 43 | * Note that this shift count expression works because we know |
32 | * shift counts are taken mod 32. | 44 | * shift counts are taken mod 32. |
33 | */ | 45 | */ |
34 | const uint32_t before_mask = (1 << (s_int << 3)) - 1; | 46 | before_mask = (1 << (s_int << 3)) - 1; |
35 | uint32_t v = (*p | before_mask) ^ (goal & before_mask); | 47 | v = (*p | before_mask) ^ (goal & before_mask); |
36 | 48 | ||
37 | /* Compute the address of the last byte. */ | 49 | /* Compute the address of the last byte. */ |
38 | const char *const last_byte_ptr = (const char *)s + n - 1; | 50 | last_byte_ptr = (const char *)s + n - 1; |
39 | 51 | ||
40 | /* Compute the address of the word containing the last byte. */ | 52 | /* Compute the address of the word containing the last byte. */ |
41 | const uint32_t *const last_word_ptr = | 53 | last_word_ptr = (const uint32_t *)((uintptr_t) last_byte_ptr & -4); |
42 | (const uint32_t *)((uintptr_t) last_byte_ptr & -4); | ||
43 | |||
44 | uint32_t bits; | ||
45 | char *ret; | ||
46 | |||
47 | if (__builtin_expect(n == 0, 0)) { | ||
48 | /* Don't dereference any memory if the array is empty. */ | ||
49 | return NULL; | ||
50 | } | ||
51 | 54 | ||
52 | while ((bits = __insn_seqb(v, goal)) == 0) { | 55 | while ((bits = __insn_seqb(v, goal)) == 0) { |
53 | if (__builtin_expect(p == last_word_ptr, 0)) { | 56 | if (__builtin_expect(p == last_word_ptr, 0)) { |
diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c index 485e24d62c6b..5cd1c4004eca 100644 --- a/arch/tile/lib/spinlock_32.c +++ b/arch/tile/lib/spinlock_32.c | |||
@@ -167,23 +167,30 @@ void arch_write_lock_slow(arch_rwlock_t *rwlock, u32 val) | |||
167 | * when we compare them. | 167 | * when we compare them. |
168 | */ | 168 | */ |
169 | u32 my_ticket_; | 169 | u32 my_ticket_; |
170 | u32 iterations = 0; | ||
170 | 171 | ||
171 | /* Take out the next ticket; this will also stop would-be readers. */ | 172 | /* |
172 | if (val & 1) | 173 | * Wait until there are no readers, then bump up the next |
173 | val = get_rwlock(rwlock); | 174 | * field and capture the ticket value. |
174 | rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT); | 175 | */ |
176 | for (;;) { | ||
177 | if (!(val & 1)) { | ||
178 | if ((val >> RD_COUNT_SHIFT) == 0) | ||
179 | break; | ||
180 | rwlock->lock = val; | ||
181 | } | ||
182 | delay_backoff(iterations++); | ||
183 | val = __insn_tns((int *)&rwlock->lock); | ||
184 | } | ||
175 | 185 | ||
176 | /* Extract my ticket value from the original word. */ | 186 | /* Take out the next ticket and extract my ticket value. */ |
187 | rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT); | ||
177 | my_ticket_ = val >> WR_NEXT_SHIFT; | 188 | my_ticket_ = val >> WR_NEXT_SHIFT; |
178 | 189 | ||
179 | /* | 190 | /* Wait until the "current" field matches our ticket. */ |
180 | * Wait until the "current" field matches our ticket, and | ||
181 | * there are no remaining readers. | ||
182 | */ | ||
183 | for (;;) { | 191 | for (;;) { |
184 | u32 curr_ = val >> WR_CURR_SHIFT; | 192 | u32 curr_ = val >> WR_CURR_SHIFT; |
185 | u32 readers = val >> RD_COUNT_SHIFT; | 193 | u32 delta = ((my_ticket_ - curr_) & WR_MASK); |
186 | u32 delta = ((my_ticket_ - curr_) & WR_MASK) + !!readers; | ||
187 | if (likely(delta == 0)) | 194 | if (likely(delta == 0)) |
188 | break; | 195 | break; |
189 | 196 | ||
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index f295b4ac941d..dcebfc831cd6 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/mman.h> | 24 | #include <linux/mman.h> |
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <linux/smp.h> | 26 | #include <linux/smp.h> |
27 | #include <linux/smp_lock.h> | ||
28 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
29 | #include <linux/init.h> | 28 | #include <linux/init.h> |
30 | #include <linux/tty.h> | 29 | #include <linux/tty.h> |
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index 24688b697a8d..201a582c4137 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/hugetlb.h> | 22 | #include <linux/hugetlb.h> |
23 | #include <linux/pagemap.h> | 23 | #include <linux/pagemap.h> |
24 | #include <linux/smp_lock.h> | ||
25 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
26 | #include <linux/err.h> | 25 | #include <linux/err.h> |
27 | #include <linux/sysctl.h> | 26 | #include <linux/sysctl.h> |
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 7f7338c90784..1664cce7b0ac 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c | |||
@@ -727,6 +727,9 @@ struct winch { | |||
727 | 727 | ||
728 | static void free_winch(struct winch *winch, int free_irq_ok) | 728 | static void free_winch(struct winch *winch, int free_irq_ok) |
729 | { | 729 | { |
730 | if (free_irq_ok) | ||
731 | free_irq(WINCH_IRQ, winch); | ||
732 | |||
730 | list_del(&winch->list); | 733 | list_del(&winch->list); |
731 | 734 | ||
732 | if (winch->pid != -1) | 735 | if (winch->pid != -1) |
@@ -735,8 +738,6 @@ static void free_winch(struct winch *winch, int free_irq_ok) | |||
735 | os_close_file(winch->fd); | 738 | os_close_file(winch->fd); |
736 | if (winch->stack != 0) | 739 | if (winch->stack != 0) |
737 | free_stack(winch->stack, 0); | 740 | free_stack(winch->stack, 0); |
738 | if (free_irq_ok) | ||
739 | free_irq(WINCH_IRQ, winch); | ||
740 | kfree(winch); | 741 | kfree(winch); |
741 | } | 742 | } |
742 | 743 | ||
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c index 340268be00b5..09bd7b585726 100644 --- a/arch/um/kernel/exec.c +++ b/arch/um/kernel/exec.c | |||
@@ -5,7 +5,6 @@ | |||
5 | 5 | ||
6 | #include "linux/stddef.h" | 6 | #include "linux/stddef.h" |
7 | #include "linux/fs.h" | 7 | #include "linux/fs.h" |
8 | #include "linux/smp_lock.h" | ||
9 | #include "linux/ptrace.h" | 8 | #include "linux/ptrace.h" |
10 | #include "linux/sched.h" | 9 | #include "linux/sched.h" |
11 | #include "linux/slab.h" | 10 | #include "linux/slab.h" |
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 849813f398e7..5852519b2d0f 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/syscalls.h> | 28 | #include <linux/syscalls.h> |
29 | #include <linux/times.h> | 29 | #include <linux/times.h> |
30 | #include <linux/utsname.h> | 30 | #include <linux/utsname.h> |
31 | #include <linux/smp_lock.h> | ||
32 | #include <linux/mm.h> | 31 | #include <linux/mm.h> |
33 | #include <linux/uio.h> | 32 | #include <linux/uio.h> |
34 | #include <linux/poll.h> | 33 | #include <linux/poll.h> |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 4d293dced62f..9479a037419f 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -216,8 +216,8 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) | |||
216 | } | 216 | } |
217 | 217 | ||
218 | /* Return an pointer with offset calculated */ | 218 | /* Return an pointer with offset calculated */ |
219 | static inline unsigned long __set_fixmap_offset(enum fixed_addresses idx, | 219 | static __always_inline unsigned long |
220 | phys_addr_t phys, pgprot_t flags) | 220 | __set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) |
221 | { | 221 | { |
222 | __set_fixmap(idx, phys, flags); | 222 | __set_fixmap(idx, phys, flags); |
223 | return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1)); | 223 | return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1)); |
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h index e8506c1f0c55..1c10c88ee4e1 100644 --- a/arch/x86/include/asm/xen/interface.h +++ b/arch/x86/include/asm/xen/interface.h | |||
@@ -61,9 +61,9 @@ DEFINE_GUEST_HANDLE(void); | |||
61 | #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) | 61 | #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | #ifndef machine_to_phys_mapping | 64 | #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) |
65 | #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) | 65 | #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) |
66 | #endif | 66 | #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>__MACH2PHYS_SHIFT) |
67 | 67 | ||
68 | /* Maximum number of virtual CPUs in multi-processor guests. */ | 68 | /* Maximum number of virtual CPUs in multi-processor guests. */ |
69 | #define MAX_VIRT_CPUS 32 | 69 | #define MAX_VIRT_CPUS 32 |
diff --git a/arch/x86/include/asm/xen/interface_32.h b/arch/x86/include/asm/xen/interface_32.h index 42a7e004ae5c..8413688b2571 100644 --- a/arch/x86/include/asm/xen/interface_32.h +++ b/arch/x86/include/asm/xen/interface_32.h | |||
@@ -32,6 +32,11 @@ | |||
32 | /* And the trap vector is... */ | 32 | /* And the trap vector is... */ |
33 | #define TRAP_INSTR "int $0x82" | 33 | #define TRAP_INSTR "int $0x82" |
34 | 34 | ||
35 | #define __MACH2PHYS_VIRT_START 0xF5800000 | ||
36 | #define __MACH2PHYS_VIRT_END 0xF6800000 | ||
37 | |||
38 | #define __MACH2PHYS_SHIFT 2 | ||
39 | |||
35 | /* | 40 | /* |
36 | * Virtual addresses beyond this are not modifiable by guest OSes. The | 41 | * Virtual addresses beyond this are not modifiable by guest OSes. The |
37 | * machine->physical mapping table starts at this address, read-only. | 42 | * machine->physical mapping table starts at this address, read-only. |
diff --git a/arch/x86/include/asm/xen/interface_64.h b/arch/x86/include/asm/xen/interface_64.h index 100d2662b97c..839a4811cf98 100644 --- a/arch/x86/include/asm/xen/interface_64.h +++ b/arch/x86/include/asm/xen/interface_64.h | |||
@@ -39,18 +39,7 @@ | |||
39 | #define __HYPERVISOR_VIRT_END 0xFFFF880000000000 | 39 | #define __HYPERVISOR_VIRT_END 0xFFFF880000000000 |
40 | #define __MACH2PHYS_VIRT_START 0xFFFF800000000000 | 40 | #define __MACH2PHYS_VIRT_START 0xFFFF800000000000 |
41 | #define __MACH2PHYS_VIRT_END 0xFFFF804000000000 | 41 | #define __MACH2PHYS_VIRT_END 0xFFFF804000000000 |
42 | 42 | #define __MACH2PHYS_SHIFT 3 | |
43 | #ifndef HYPERVISOR_VIRT_START | ||
44 | #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) | ||
45 | #define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) | ||
46 | #endif | ||
47 | |||
48 | #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) | ||
49 | #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) | ||
50 | #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) | ||
51 | #ifndef machine_to_phys_mapping | ||
52 | #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) | ||
53 | #endif | ||
54 | 43 | ||
55 | /* | 44 | /* |
56 | * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) | 45 | * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index dd8c1414b3d5..8760cc60a21c 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
6 | #include <linux/spinlock.h> | 6 | #include <linux/spinlock.h> |
7 | #include <linux/pfn.h> | 7 | #include <linux/pfn.h> |
8 | #include <linux/mm.h> | ||
8 | 9 | ||
9 | #include <asm/uaccess.h> | 10 | #include <asm/uaccess.h> |
10 | #include <asm/page.h> | 11 | #include <asm/page.h> |
@@ -35,6 +36,8 @@ typedef struct xpaddr { | |||
35 | #define MAX_DOMAIN_PAGES \ | 36 | #define MAX_DOMAIN_PAGES \ |
36 | ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE)) | 37 | ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE)) |
37 | 38 | ||
39 | extern unsigned long *machine_to_phys_mapping; | ||
40 | extern unsigned int machine_to_phys_order; | ||
38 | 41 | ||
39 | extern unsigned long get_phys_to_machine(unsigned long pfn); | 42 | extern unsigned long get_phys_to_machine(unsigned long pfn); |
40 | extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); | 43 | extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); |
@@ -69,10 +72,8 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) | |||
69 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 72 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
70 | return mfn; | 73 | return mfn; |
71 | 74 | ||
72 | #if 0 | ||
73 | if (unlikely((mfn >> machine_to_phys_order) != 0)) | 75 | if (unlikely((mfn >> machine_to_phys_order) != 0)) |
74 | return max_mapnr; | 76 | return ~0; |
75 | #endif | ||
76 | 77 | ||
77 | pfn = 0; | 78 | pfn = 0; |
78 | /* | 79 | /* |
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 1b7b31ab7d86..212a6a42527c 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
34 | #include <linux/poll.h> | 34 | #include <linux/poll.h> |
35 | #include <linux/smp.h> | 35 | #include <linux/smp.h> |
36 | #include <linux/smp_lock.h> | ||
37 | #include <linux/major.h> | 36 | #include <linux/major.h> |
38 | #include <linux/fs.h> | 37 | #include <linux/fs.h> |
39 | #include <linux/device.h> | 38 | #include <linux/device.h> |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index ec592caac4b4..cd21b654dec6 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -315,14 +315,18 @@ static void kgdb_remove_all_hw_break(void) | |||
315 | if (!breakinfo[i].enabled) | 315 | if (!breakinfo[i].enabled) |
316 | continue; | 316 | continue; |
317 | bp = *per_cpu_ptr(breakinfo[i].pev, cpu); | 317 | bp = *per_cpu_ptr(breakinfo[i].pev, cpu); |
318 | if (bp->attr.disabled == 1) | 318 | if (!bp->attr.disabled) { |
319 | arch_uninstall_hw_breakpoint(bp); | ||
320 | bp->attr.disabled = 1; | ||
319 | continue; | 321 | continue; |
322 | } | ||
320 | if (dbg_is_early) | 323 | if (dbg_is_early) |
321 | early_dr7 &= ~encode_dr7(i, breakinfo[i].len, | 324 | early_dr7 &= ~encode_dr7(i, breakinfo[i].len, |
322 | breakinfo[i].type); | 325 | breakinfo[i].type); |
323 | else | 326 | else if (hw_break_release_slot(i)) |
324 | arch_uninstall_hw_breakpoint(bp); | 327 | printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n", |
325 | bp->attr.disabled = 1; | 328 | breakinfo[i].addr); |
329 | breakinfo[i].enabled = 0; | ||
326 | } | 330 | } |
327 | } | 331 | } |
328 | 332 | ||
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 7bf2dc4c8f70..12fcbe2c143e 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | #include <linux/poll.h> | 31 | #include <linux/poll.h> |
32 | #include <linux/smp.h> | 32 | #include <linux/smp.h> |
33 | #include <linux/smp_lock.h> | ||
34 | #include <linux/major.h> | 33 | #include <linux/major.h> |
35 | #include <linux/fs.h> | 34 | #include <linux/fs.h> |
36 | #include <linux/device.h> | 35 | #include <linux/device.h> |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 82e144a4e514..1ca12298ffc7 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -3395,6 +3395,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
3395 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; | 3395 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; |
3396 | 3396 | ||
3397 | load_host_msrs(vcpu); | 3397 | load_host_msrs(vcpu); |
3398 | kvm_load_ldt(ldt_selector); | ||
3398 | loadsegment(fs, fs_selector); | 3399 | loadsegment(fs, fs_selector); |
3399 | #ifdef CONFIG_X86_64 | 3400 | #ifdef CONFIG_X86_64 |
3400 | load_gs_index(gs_selector); | 3401 | load_gs_index(gs_selector); |
@@ -3402,7 +3403,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
3402 | #else | 3403 | #else |
3403 | loadsegment(gs, gs_selector); | 3404 | loadsegment(gs, gs_selector); |
3404 | #endif | 3405 | #endif |
3405 | kvm_load_ldt(ldt_selector); | ||
3406 | 3406 | ||
3407 | reload_tss(vcpu); | 3407 | reload_tss(vcpu); |
3408 | 3408 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 8da0e45ff7c9..ff21fdda0c53 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -821,10 +821,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
821 | #endif | 821 | #endif |
822 | 822 | ||
823 | #ifdef CONFIG_X86_64 | 823 | #ifdef CONFIG_X86_64 |
824 | if (is_long_mode(&vmx->vcpu)) { | 824 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
825 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); | 825 | if (is_long_mode(&vmx->vcpu)) |
826 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); | 826 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); |
827 | } | ||
828 | #endif | 827 | #endif |
829 | for (i = 0; i < vmx->save_nmsrs; ++i) | 828 | for (i = 0; i < vmx->save_nmsrs; ++i) |
830 | kvm_set_shared_msr(vmx->guest_msrs[i].index, | 829 | kvm_set_shared_msr(vmx->guest_msrs[i].index, |
@@ -839,23 +838,23 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |||
839 | 838 | ||
840 | ++vmx->vcpu.stat.host_state_reload; | 839 | ++vmx->vcpu.stat.host_state_reload; |
841 | vmx->host_state.loaded = 0; | 840 | vmx->host_state.loaded = 0; |
842 | if (vmx->host_state.fs_reload_needed) | 841 | #ifdef CONFIG_X86_64 |
843 | loadsegment(fs, vmx->host_state.fs_sel); | 842 | if (is_long_mode(&vmx->vcpu)) |
843 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); | ||
844 | #endif | ||
844 | if (vmx->host_state.gs_ldt_reload_needed) { | 845 | if (vmx->host_state.gs_ldt_reload_needed) { |
845 | kvm_load_ldt(vmx->host_state.ldt_sel); | 846 | kvm_load_ldt(vmx->host_state.ldt_sel); |
846 | #ifdef CONFIG_X86_64 | 847 | #ifdef CONFIG_X86_64 |
847 | load_gs_index(vmx->host_state.gs_sel); | 848 | load_gs_index(vmx->host_state.gs_sel); |
848 | wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); | ||
849 | #else | 849 | #else |
850 | loadsegment(gs, vmx->host_state.gs_sel); | 850 | loadsegment(gs, vmx->host_state.gs_sel); |
851 | #endif | 851 | #endif |
852 | } | 852 | } |
853 | if (vmx->host_state.fs_reload_needed) | ||
854 | loadsegment(fs, vmx->host_state.fs_sel); | ||
853 | reload_tss(); | 855 | reload_tss(); |
854 | #ifdef CONFIG_X86_64 | 856 | #ifdef CONFIG_X86_64 |
855 | if (is_long_mode(&vmx->vcpu)) { | 857 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
856 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); | ||
857 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); | ||
858 | } | ||
859 | #endif | 858 | #endif |
860 | if (current_thread_info()->status & TS_USEDFPU) | 859 | if (current_thread_info()->status & TS_USEDFPU) |
861 | clts(); | 860 | clts(); |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 15466c096ba5..0972315c3860 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -138,7 +138,6 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
138 | struct acpi_resource_address64 addr; | 138 | struct acpi_resource_address64 addr; |
139 | acpi_status status; | 139 | acpi_status status; |
140 | unsigned long flags; | 140 | unsigned long flags; |
141 | struct resource *root, *conflict; | ||
142 | u64 start, end; | 141 | u64 start, end; |
143 | 142 | ||
144 | status = resource_to_addr(acpi_res, &addr); | 143 | status = resource_to_addr(acpi_res, &addr); |
@@ -146,12 +145,10 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
146 | return AE_OK; | 145 | return AE_OK; |
147 | 146 | ||
148 | if (addr.resource_type == ACPI_MEMORY_RANGE) { | 147 | if (addr.resource_type == ACPI_MEMORY_RANGE) { |
149 | root = &iomem_resource; | ||
150 | flags = IORESOURCE_MEM; | 148 | flags = IORESOURCE_MEM; |
151 | if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY) | 149 | if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY) |
152 | flags |= IORESOURCE_PREFETCH; | 150 | flags |= IORESOURCE_PREFETCH; |
153 | } else if (addr.resource_type == ACPI_IO_RANGE) { | 151 | } else if (addr.resource_type == ACPI_IO_RANGE) { |
154 | root = &ioport_resource; | ||
155 | flags = IORESOURCE_IO; | 152 | flags = IORESOURCE_IO; |
156 | } else | 153 | } else |
157 | return AE_OK; | 154 | return AE_OK; |
@@ -172,25 +169,90 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
172 | return AE_OK; | 169 | return AE_OK; |
173 | } | 170 | } |
174 | 171 | ||
175 | conflict = insert_resource_conflict(root, res); | 172 | info->res_num++; |
176 | if (conflict) { | 173 | if (addr.translation_offset) |
177 | dev_err(&info->bridge->dev, | 174 | dev_info(&info->bridge->dev, "host bridge window %pR " |
178 | "address space collision: host bridge window %pR " | 175 | "(PCI address [%#llx-%#llx])\n", |
179 | "conflicts with %s %pR\n", | 176 | res, res->start - addr.translation_offset, |
180 | res, conflict->name, conflict); | 177 | res->end - addr.translation_offset); |
181 | } else { | 178 | else |
182 | pci_bus_add_resource(info->bus, res, 0); | 179 | dev_info(&info->bridge->dev, "host bridge window %pR\n", res); |
183 | info->res_num++; | 180 | |
184 | if (addr.translation_offset) | 181 | return AE_OK; |
185 | dev_info(&info->bridge->dev, "host bridge window %pR " | 182 | } |
186 | "(PCI address [%#llx-%#llx])\n", | 183 | |
187 | res, res->start - addr.translation_offset, | 184 | static bool resource_contains(struct resource *res, resource_size_t point) |
188 | res->end - addr.translation_offset); | 185 | { |
186 | if (res->start <= point && point <= res->end) | ||
187 | return true; | ||
188 | return false; | ||
189 | } | ||
190 | |||
191 | static void coalesce_windows(struct pci_root_info *info, int type) | ||
192 | { | ||
193 | int i, j; | ||
194 | struct resource *res1, *res2; | ||
195 | |||
196 | for (i = 0; i < info->res_num; i++) { | ||
197 | res1 = &info->res[i]; | ||
198 | if (!(res1->flags & type)) | ||
199 | continue; | ||
200 | |||
201 | for (j = i + 1; j < info->res_num; j++) { | ||
202 | res2 = &info->res[j]; | ||
203 | if (!(res2->flags & type)) | ||
204 | continue; | ||
205 | |||
206 | /* | ||
207 | * I don't like throwing away windows because then | ||
208 | * our resources no longer match the ACPI _CRS, but | ||
209 | * the kernel resource tree doesn't allow overlaps. | ||
210 | */ | ||
211 | if (resource_contains(res1, res2->start) || | ||
212 | resource_contains(res1, res2->end) || | ||
213 | resource_contains(res2, res1->start) || | ||
214 | resource_contains(res2, res1->end)) { | ||
215 | res1->start = min(res1->start, res2->start); | ||
216 | res1->end = max(res1->end, res2->end); | ||
217 | dev_info(&info->bridge->dev, | ||
218 | "host bridge window expanded to %pR; %pR ignored\n", | ||
219 | res1, res2); | ||
220 | res2->flags = 0; | ||
221 | } | ||
222 | } | ||
223 | } | ||
224 | } | ||
225 | |||
226 | static void add_resources(struct pci_root_info *info) | ||
227 | { | ||
228 | int i; | ||
229 | struct resource *res, *root, *conflict; | ||
230 | |||
231 | if (!pci_use_crs) | ||
232 | return; | ||
233 | |||
234 | coalesce_windows(info, IORESOURCE_MEM); | ||
235 | coalesce_windows(info, IORESOURCE_IO); | ||
236 | |||
237 | for (i = 0; i < info->res_num; i++) { | ||
238 | res = &info->res[i]; | ||
239 | |||
240 | if (res->flags & IORESOURCE_MEM) | ||
241 | root = &iomem_resource; | ||
242 | else if (res->flags & IORESOURCE_IO) | ||
243 | root = &ioport_resource; | ||
189 | else | 244 | else |
190 | dev_info(&info->bridge->dev, | 245 | continue; |
191 | "host bridge window %pR\n", res); | 246 | |
247 | conflict = insert_resource_conflict(root, res); | ||
248 | if (conflict) | ||
249 | dev_err(&info->bridge->dev, | ||
250 | "address space collision: host bridge window %pR " | ||
251 | "conflicts with %s %pR\n", | ||
252 | res, conflict->name, conflict); | ||
253 | else | ||
254 | pci_bus_add_resource(info->bus, res, 0); | ||
192 | } | 255 | } |
193 | return AE_OK; | ||
194 | } | 256 | } |
195 | 257 | ||
196 | static void | 258 | static void |
@@ -224,6 +286,7 @@ get_current_resources(struct acpi_device *device, int busnum, | |||
224 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, | 286 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, |
225 | &info); | 287 | &info); |
226 | 288 | ||
289 | add_resources(&info); | ||
227 | return; | 290 | return; |
228 | 291 | ||
229 | name_alloc_fail: | 292 | name_alloc_fail: |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 235c0f4d3861..02c710bebf7a 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -75,6 +75,11 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); | |||
75 | enum xen_domain_type xen_domain_type = XEN_NATIVE; | 75 | enum xen_domain_type xen_domain_type = XEN_NATIVE; |
76 | EXPORT_SYMBOL_GPL(xen_domain_type); | 76 | EXPORT_SYMBOL_GPL(xen_domain_type); |
77 | 77 | ||
78 | unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START; | ||
79 | EXPORT_SYMBOL(machine_to_phys_mapping); | ||
80 | unsigned int machine_to_phys_order; | ||
81 | EXPORT_SYMBOL(machine_to_phys_order); | ||
82 | |||
78 | struct start_info *xen_start_info; | 83 | struct start_info *xen_start_info; |
79 | EXPORT_SYMBOL_GPL(xen_start_info); | 84 | EXPORT_SYMBOL_GPL(xen_start_info); |
80 | 85 | ||
@@ -1090,6 +1095,8 @@ static void __init xen_setup_stackprotector(void) | |||
1090 | /* First C function to be called on Xen boot */ | 1095 | /* First C function to be called on Xen boot */ |
1091 | asmlinkage void __init xen_start_kernel(void) | 1096 | asmlinkage void __init xen_start_kernel(void) |
1092 | { | 1097 | { |
1098 | struct physdev_set_iopl set_iopl; | ||
1099 | int rc; | ||
1093 | pgd_t *pgd; | 1100 | pgd_t *pgd; |
1094 | 1101 | ||
1095 | if (!xen_start_info) | 1102 | if (!xen_start_info) |
@@ -1097,6 +1104,8 @@ asmlinkage void __init xen_start_kernel(void) | |||
1097 | 1104 | ||
1098 | xen_domain_type = XEN_PV_DOMAIN; | 1105 | xen_domain_type = XEN_PV_DOMAIN; |
1099 | 1106 | ||
1107 | xen_setup_machphys_mapping(); | ||
1108 | |||
1100 | /* Install Xen paravirt ops */ | 1109 | /* Install Xen paravirt ops */ |
1101 | pv_info = xen_info; | 1110 | pv_info = xen_info; |
1102 | pv_init_ops = xen_init_ops; | 1111 | pv_init_ops = xen_init_ops; |
@@ -1191,8 +1200,6 @@ asmlinkage void __init xen_start_kernel(void) | |||
1191 | /* Allocate and initialize top and mid mfn levels for p2m structure */ | 1200 | /* Allocate and initialize top and mid mfn levels for p2m structure */ |
1192 | xen_build_mfn_list_list(); | 1201 | xen_build_mfn_list_list(); |
1193 | 1202 | ||
1194 | init_mm.pgd = pgd; | ||
1195 | |||
1196 | /* keep using Xen gdt for now; no urgent need to change it */ | 1203 | /* keep using Xen gdt for now; no urgent need to change it */ |
1197 | 1204 | ||
1198 | #ifdef CONFIG_X86_32 | 1205 | #ifdef CONFIG_X86_32 |
@@ -1202,10 +1209,18 @@ asmlinkage void __init xen_start_kernel(void) | |||
1202 | #else | 1209 | #else |
1203 | pv_info.kernel_rpl = 0; | 1210 | pv_info.kernel_rpl = 0; |
1204 | #endif | 1211 | #endif |
1205 | |||
1206 | /* set the limit of our address space */ | 1212 | /* set the limit of our address space */ |
1207 | xen_reserve_top(); | 1213 | xen_reserve_top(); |
1208 | 1214 | ||
1215 | /* We used to do this in xen_arch_setup, but that is too late on AMD | ||
1216 | * were early_cpu_init (run before ->arch_setup()) calls early_amd_init | ||
1217 | * which pokes 0xcf8 port. | ||
1218 | */ | ||
1219 | set_iopl.iopl = 1; | ||
1220 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); | ||
1221 | if (rc != 0) | ||
1222 | xen_raw_printk("physdev_op failed %d\n", rc); | ||
1223 | |||
1209 | #ifdef CONFIG_X86_32 | 1224 | #ifdef CONFIG_X86_32 |
1210 | /* set up basic CPUID stuff */ | 1225 | /* set up basic CPUID stuff */ |
1211 | cpu_detect(&new_cpu_data); | 1226 | cpu_detect(&new_cpu_data); |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 21ed8d7f75a5..a1feff9e59b6 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -2034,6 +2034,20 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) | |||
2034 | set_page_prot(pmd, PAGE_KERNEL_RO); | 2034 | set_page_prot(pmd, PAGE_KERNEL_RO); |
2035 | } | 2035 | } |
2036 | 2036 | ||
2037 | void __init xen_setup_machphys_mapping(void) | ||
2038 | { | ||
2039 | struct xen_machphys_mapping mapping; | ||
2040 | unsigned long machine_to_phys_nr_ents; | ||
2041 | |||
2042 | if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { | ||
2043 | machine_to_phys_mapping = (unsigned long *)mapping.v_start; | ||
2044 | machine_to_phys_nr_ents = mapping.max_mfn + 1; | ||
2045 | } else { | ||
2046 | machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES; | ||
2047 | } | ||
2048 | machine_to_phys_order = fls(machine_to_phys_nr_ents - 1); | ||
2049 | } | ||
2050 | |||
2037 | #ifdef CONFIG_X86_64 | 2051 | #ifdef CONFIG_X86_64 |
2038 | static void convert_pfn_mfn(void *v) | 2052 | static void convert_pfn_mfn(void *v) |
2039 | { | 2053 | { |
@@ -2119,44 +2133,83 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | |||
2119 | return pgd; | 2133 | return pgd; |
2120 | } | 2134 | } |
2121 | #else /* !CONFIG_X86_64 */ | 2135 | #else /* !CONFIG_X86_64 */ |
2122 | static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD); | 2136 | static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); |
2137 | static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); | ||
2138 | |||
2139 | static __init void xen_write_cr3_init(unsigned long cr3) | ||
2140 | { | ||
2141 | unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); | ||
2142 | |||
2143 | BUG_ON(read_cr3() != __pa(initial_page_table)); | ||
2144 | BUG_ON(cr3 != __pa(swapper_pg_dir)); | ||
2145 | |||
2146 | /* | ||
2147 | * We are switching to swapper_pg_dir for the first time (from | ||
2148 | * initial_page_table) and therefore need to mark that page | ||
2149 | * read-only and then pin it. | ||
2150 | * | ||
2151 | * Xen disallows sharing of kernel PMDs for PAE | ||
2152 | * guests. Therefore we must copy the kernel PMD from | ||
2153 | * initial_page_table into a new kernel PMD to be used in | ||
2154 | * swapper_pg_dir. | ||
2155 | */ | ||
2156 | swapper_kernel_pmd = | ||
2157 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); | ||
2158 | memcpy(swapper_kernel_pmd, initial_kernel_pmd, | ||
2159 | sizeof(pmd_t) * PTRS_PER_PMD); | ||
2160 | swapper_pg_dir[KERNEL_PGD_BOUNDARY] = | ||
2161 | __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT); | ||
2162 | set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO); | ||
2163 | |||
2164 | set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); | ||
2165 | xen_write_cr3(cr3); | ||
2166 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn); | ||
2167 | |||
2168 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, | ||
2169 | PFN_DOWN(__pa(initial_page_table))); | ||
2170 | set_page_prot(initial_page_table, PAGE_KERNEL); | ||
2171 | set_page_prot(initial_kernel_pmd, PAGE_KERNEL); | ||
2172 | |||
2173 | pv_mmu_ops.write_cr3 = &xen_write_cr3; | ||
2174 | } | ||
2123 | 2175 | ||
2124 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | 2176 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, |
2125 | unsigned long max_pfn) | 2177 | unsigned long max_pfn) |
2126 | { | 2178 | { |
2127 | pmd_t *kernel_pmd; | 2179 | pmd_t *kernel_pmd; |
2128 | 2180 | ||
2129 | level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); | 2181 | initial_kernel_pmd = |
2182 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); | ||
2130 | 2183 | ||
2131 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + | 2184 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + |
2132 | xen_start_info->nr_pt_frames * PAGE_SIZE + | 2185 | xen_start_info->nr_pt_frames * PAGE_SIZE + |
2133 | 512*1024); | 2186 | 512*1024); |
2134 | 2187 | ||
2135 | kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); | 2188 | kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); |
2136 | memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); | 2189 | memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); |
2137 | 2190 | ||
2138 | xen_map_identity_early(level2_kernel_pgt, max_pfn); | 2191 | xen_map_identity_early(initial_kernel_pmd, max_pfn); |
2139 | 2192 | ||
2140 | memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD); | 2193 | memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD); |
2141 | set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY], | 2194 | initial_page_table[KERNEL_PGD_BOUNDARY] = |
2142 | __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT)); | 2195 | __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); |
2143 | 2196 | ||
2144 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); | 2197 | set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO); |
2145 | set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); | 2198 | set_page_prot(initial_page_table, PAGE_KERNEL_RO); |
2146 | set_page_prot(empty_zero_page, PAGE_KERNEL_RO); | 2199 | set_page_prot(empty_zero_page, PAGE_KERNEL_RO); |
2147 | 2200 | ||
2148 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | 2201 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); |
2149 | 2202 | ||
2150 | xen_write_cr3(__pa(swapper_pg_dir)); | 2203 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, |
2151 | 2204 | PFN_DOWN(__pa(initial_page_table))); | |
2152 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); | 2205 | xen_write_cr3(__pa(initial_page_table)); |
2153 | 2206 | ||
2154 | memblock_x86_reserve_range(__pa(xen_start_info->pt_base), | 2207 | memblock_x86_reserve_range(__pa(xen_start_info->pt_base), |
2155 | __pa(xen_start_info->pt_base + | 2208 | __pa(xen_start_info->pt_base + |
2156 | xen_start_info->nr_pt_frames * PAGE_SIZE), | 2209 | xen_start_info->nr_pt_frames * PAGE_SIZE), |
2157 | "XEN PAGETABLES"); | 2210 | "XEN PAGETABLES"); |
2158 | 2211 | ||
2159 | return swapper_pg_dir; | 2212 | return initial_page_table; |
2160 | } | 2213 | } |
2161 | #endif /* CONFIG_X86_64 */ | 2214 | #endif /* CONFIG_X86_64 */ |
2162 | 2215 | ||
@@ -2290,7 +2343,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
2290 | .write_cr2 = xen_write_cr2, | 2343 | .write_cr2 = xen_write_cr2, |
2291 | 2344 | ||
2292 | .read_cr3 = xen_read_cr3, | 2345 | .read_cr3 = xen_read_cr3, |
2346 | #ifdef CONFIG_X86_32 | ||
2347 | .write_cr3 = xen_write_cr3_init, | ||
2348 | #else | ||
2293 | .write_cr3 = xen_write_cr3, | 2349 | .write_cr3 = xen_write_cr3, |
2350 | #endif | ||
2294 | 2351 | ||
2295 | .flush_tlb_user = xen_flush_tlb, | 2352 | .flush_tlb_user = xen_flush_tlb, |
2296 | .flush_tlb_kernel = xen_flush_tlb, | 2353 | .flush_tlb_kernel = xen_flush_tlb, |
@@ -2627,7 +2684,8 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma, | |||
2627 | 2684 | ||
2628 | prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); | 2685 | prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); |
2629 | 2686 | ||
2630 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; | 2687 | BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) == |
2688 | (VM_PFNMAP | VM_RESERVED | VM_IO))); | ||
2631 | 2689 | ||
2632 | rmd.mfn = mfn; | 2690 | rmd.mfn = mfn; |
2633 | rmd.prot = prot; | 2691 | rmd.prot = prot; |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 769c4b01fa32..01afd8a94607 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <xen/interface/callback.h> | 23 | #include <xen/interface/callback.h> |
24 | #include <xen/interface/memory.h> | 24 | #include <xen/interface/memory.h> |
25 | #include <xen/interface/physdev.h> | 25 | #include <xen/interface/physdev.h> |
26 | #include <xen/interface/memory.h> | ||
27 | #include <xen/features.h> | 26 | #include <xen/features.h> |
28 | 27 | ||
29 | #include "xen-ops.h" | 28 | #include "xen-ops.h" |
@@ -248,8 +247,7 @@ char * __init xen_memory_setup(void) | |||
248 | else | 247 | else |
249 | extra_pages = 0; | 248 | extra_pages = 0; |
250 | 249 | ||
251 | if (!xen_initial_domain()) | 250 | xen_add_extra_mem(extra_pages); |
252 | xen_add_extra_mem(extra_pages); | ||
253 | 251 | ||
254 | return "Xen"; | 252 | return "Xen"; |
255 | } | 253 | } |
@@ -337,9 +335,6 @@ void __cpuinit xen_enable_syscall(void) | |||
337 | 335 | ||
338 | void __init xen_arch_setup(void) | 336 | void __init xen_arch_setup(void) |
339 | { | 337 | { |
340 | struct physdev_set_iopl set_iopl; | ||
341 | int rc; | ||
342 | |||
343 | xen_panic_handler_init(); | 338 | xen_panic_handler_init(); |
344 | 339 | ||
345 | HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); | 340 | HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); |
@@ -356,11 +351,6 @@ void __init xen_arch_setup(void) | |||
356 | xen_enable_sysenter(); | 351 | xen_enable_sysenter(); |
357 | xen_enable_syscall(); | 352 | xen_enable_syscall(); |
358 | 353 | ||
359 | set_iopl.iopl = 1; | ||
360 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); | ||
361 | if (rc != 0) | ||
362 | printk(KERN_INFO "physdev_op failed %d\n", rc); | ||
363 | |||
364 | #ifdef CONFIG_ACPI | 354 | #ifdef CONFIG_ACPI |
365 | if (!(xen_start_info->flags & SIF_INITDOMAIN)) { | 355 | if (!(xen_start_info->flags & SIF_INITDOMAIN)) { |
366 | printk(KERN_INFO "ACPI in unprivileged domain disabled\n"); | 356 | printk(KERN_INFO "ACPI in unprivileged domain disabled\n"); |
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index 58c6ee5b010c..cc3eb78e333a 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <linux/hdreg.h> | 8 | #include <linux/hdreg.h> |
9 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
10 | #include <linux/syscalls.h> | 10 | #include <linux/syscalls.h> |
11 | #include <linux/smp_lock.h> | ||
12 | #include <linux/types.h> | 11 | #include <linux/types.h> |
13 | #include <linux/uaccess.h> | 12 | #include <linux/uaccess.h> |
14 | 13 | ||
diff --git a/block/ioctl.c b/block/ioctl.c index 3d866d0037f2..a9a302eba01e 100644 --- a/block/ioctl.c +++ b/block/ioctl.c | |||
@@ -5,7 +5,6 @@ | |||
5 | #include <linux/hdreg.h> | 5 | #include <linux/hdreg.h> |
6 | #include <linux/backing-dev.h> | 6 | #include <linux/backing-dev.h> |
7 | #include <linux/buffer_head.h> | 7 | #include <linux/buffer_head.h> |
8 | #include <linux/smp_lock.h> | ||
9 | #include <linux/blktrace_api.h> | 8 | #include <linux/blktrace_api.h> |
10 | #include <asm/uaccess.h> | 9 | #include <asm/uaccess.h> |
11 | 10 | ||
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 3f91c01c217f..66aa4bee80a6 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -3166,8 +3166,8 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, | |||
3166 | 3166 | ||
3167 | /** | 3167 | /** |
3168 | * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device | 3168 | * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device |
3169 | * @shost: SCSI host of command to be sent | ||
3169 | * @cmd: SCSI command to be sent | 3170 | * @cmd: SCSI command to be sent |
3170 | * @done: Completion function, called when command is complete | ||
3171 | * | 3171 | * |
3172 | * In some cases, this function translates SCSI commands into | 3172 | * In some cases, this function translates SCSI commands into |
3173 | * ATA taskfiles, and queues the taskfiles to be sent to | 3173 | * ATA taskfiles, and queues the taskfiles to be sent to |
@@ -3177,37 +3177,36 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, | |||
3177 | * ATA and ATAPI devices appearing as SCSI devices. | 3177 | * ATA and ATAPI devices appearing as SCSI devices. |
3178 | * | 3178 | * |
3179 | * LOCKING: | 3179 | * LOCKING: |
3180 | * Releases scsi-layer-held lock, and obtains host lock. | 3180 | * ATA host lock |
3181 | * | 3181 | * |
3182 | * RETURNS: | 3182 | * RETURNS: |
3183 | * Return value from __ata_scsi_queuecmd() if @cmd can be queued, | 3183 | * Return value from __ata_scsi_queuecmd() if @cmd can be queued, |
3184 | * 0 otherwise. | 3184 | * 0 otherwise. |
3185 | */ | 3185 | */ |
3186 | int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 3186 | int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) |
3187 | { | 3187 | { |
3188 | struct ata_port *ap; | 3188 | struct ata_port *ap; |
3189 | struct ata_device *dev; | 3189 | struct ata_device *dev; |
3190 | struct scsi_device *scsidev = cmd->device; | 3190 | struct scsi_device *scsidev = cmd->device; |
3191 | struct Scsi_Host *shost = scsidev->host; | ||
3192 | int rc = 0; | 3191 | int rc = 0; |
3192 | unsigned long irq_flags; | ||
3193 | 3193 | ||
3194 | ap = ata_shost_to_port(shost); | 3194 | ap = ata_shost_to_port(shost); |
3195 | 3195 | ||
3196 | spin_unlock(shost->host_lock); | 3196 | spin_lock_irqsave(ap->lock, irq_flags); |
3197 | spin_lock(ap->lock); | ||
3198 | 3197 | ||
3199 | ata_scsi_dump_cdb(ap, cmd); | 3198 | ata_scsi_dump_cdb(ap, cmd); |
3200 | 3199 | ||
3201 | dev = ata_scsi_find_dev(ap, scsidev); | 3200 | dev = ata_scsi_find_dev(ap, scsidev); |
3202 | if (likely(dev)) | 3201 | if (likely(dev)) |
3203 | rc = __ata_scsi_queuecmd(cmd, done, dev); | 3202 | rc = __ata_scsi_queuecmd(cmd, cmd->scsi_done, dev); |
3204 | else { | 3203 | else { |
3205 | cmd->result = (DID_BAD_TARGET << 16); | 3204 | cmd->result = (DID_BAD_TARGET << 16); |
3206 | done(cmd); | 3205 | cmd->scsi_done(cmd); |
3207 | } | 3206 | } |
3208 | 3207 | ||
3209 | spin_unlock(ap->lock); | 3208 | spin_unlock_irqrestore(ap->lock, irq_flags); |
3210 | spin_lock(shost->host_lock); | 3209 | |
3211 | return rc; | 3210 | return rc; |
3212 | } | 3211 | } |
3213 | 3212 | ||
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index c21589986c69..8b677bbf2d37 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c | |||
@@ -538,7 +538,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) | |||
538 | return 0; | 538 | return 0; |
539 | } | 539 | } |
540 | 540 | ||
541 | static void svia_configure(struct pci_dev *pdev) | 541 | static void svia_configure(struct pci_dev *pdev, int board_id) |
542 | { | 542 | { |
543 | u8 tmp8; | 543 | u8 tmp8; |
544 | 544 | ||
@@ -577,7 +577,7 @@ static void svia_configure(struct pci_dev *pdev) | |||
577 | } | 577 | } |
578 | 578 | ||
579 | /* | 579 | /* |
580 | * vt6421 has problems talking to some drives. The following | 580 | * vt6420/1 has problems talking to some drives. The following |
581 | * is the fix from Joseph Chan <JosephChan@via.com.tw>. | 581 | * is the fix from Joseph Chan <JosephChan@via.com.tw>. |
582 | * | 582 | * |
583 | * When host issues HOLD, device may send up to 20DW of data | 583 | * When host issues HOLD, device may send up to 20DW of data |
@@ -596,8 +596,9 @@ static void svia_configure(struct pci_dev *pdev) | |||
596 | * | 596 | * |
597 | * https://bugzilla.kernel.org/show_bug.cgi?id=15173 | 597 | * https://bugzilla.kernel.org/show_bug.cgi?id=15173 |
598 | * http://article.gmane.org/gmane.linux.ide/46352 | 598 | * http://article.gmane.org/gmane.linux.ide/46352 |
599 | * http://thread.gmane.org/gmane.linux.kernel/1062139 | ||
599 | */ | 600 | */ |
600 | if (pdev->device == 0x3249) { | 601 | if (board_id == vt6420 || board_id == vt6421) { |
601 | pci_read_config_byte(pdev, 0x52, &tmp8); | 602 | pci_read_config_byte(pdev, 0x52, &tmp8); |
602 | tmp8 |= 1 << 2; | 603 | tmp8 |= 1 << 2; |
603 | pci_write_config_byte(pdev, 0x52, tmp8); | 604 | pci_write_config_byte(pdev, 0x52, tmp8); |
@@ -652,7 +653,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
652 | if (rc) | 653 | if (rc) |
653 | return rc; | 654 | return rc; |
654 | 655 | ||
655 | svia_configure(pdev); | 656 | svia_configure(pdev, board_id); |
656 | 657 | ||
657 | pci_set_master(pdev); | 658 | pci_set_master(pdev); |
658 | return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, | 659 | return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 31b526661ec4..ead3e79d6fcf 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -475,20 +475,33 @@ End: | |||
475 | */ | 475 | */ |
476 | void dpm_resume_noirq(pm_message_t state) | 476 | void dpm_resume_noirq(pm_message_t state) |
477 | { | 477 | { |
478 | struct device *dev; | 478 | struct list_head list; |
479 | ktime_t starttime = ktime_get(); | 479 | ktime_t starttime = ktime_get(); |
480 | 480 | ||
481 | INIT_LIST_HEAD(&list); | ||
481 | mutex_lock(&dpm_list_mtx); | 482 | mutex_lock(&dpm_list_mtx); |
482 | transition_started = false; | 483 | transition_started = false; |
483 | list_for_each_entry(dev, &dpm_list, power.entry) | 484 | while (!list_empty(&dpm_list)) { |
485 | struct device *dev = to_device(dpm_list.next); | ||
486 | |||
487 | get_device(dev); | ||
484 | if (dev->power.status > DPM_OFF) { | 488 | if (dev->power.status > DPM_OFF) { |
485 | int error; | 489 | int error; |
486 | 490 | ||
487 | dev->power.status = DPM_OFF; | 491 | dev->power.status = DPM_OFF; |
492 | mutex_unlock(&dpm_list_mtx); | ||
493 | |||
488 | error = device_resume_noirq(dev, state); | 494 | error = device_resume_noirq(dev, state); |
495 | |||
496 | mutex_lock(&dpm_list_mtx); | ||
489 | if (error) | 497 | if (error) |
490 | pm_dev_err(dev, state, " early", error); | 498 | pm_dev_err(dev, state, " early", error); |
491 | } | 499 | } |
500 | if (!list_empty(&dev->power.entry)) | ||
501 | list_move_tail(&dev->power.entry, &list); | ||
502 | put_device(dev); | ||
503 | } | ||
504 | list_splice(&list, &dpm_list); | ||
492 | mutex_unlock(&dpm_list_mtx); | 505 | mutex_unlock(&dpm_list_mtx); |
493 | dpm_show_time(starttime, state, "early"); | 506 | dpm_show_time(starttime, state, "early"); |
494 | resume_device_irqs(); | 507 | resume_device_irqs(); |
@@ -789,20 +802,33 @@ End: | |||
789 | */ | 802 | */ |
790 | int dpm_suspend_noirq(pm_message_t state) | 803 | int dpm_suspend_noirq(pm_message_t state) |
791 | { | 804 | { |
792 | struct device *dev; | 805 | struct list_head list; |
793 | ktime_t starttime = ktime_get(); | 806 | ktime_t starttime = ktime_get(); |
794 | int error = 0; | 807 | int error = 0; |
795 | 808 | ||
809 | INIT_LIST_HEAD(&list); | ||
796 | suspend_device_irqs(); | 810 | suspend_device_irqs(); |
797 | mutex_lock(&dpm_list_mtx); | 811 | mutex_lock(&dpm_list_mtx); |
798 | list_for_each_entry_reverse(dev, &dpm_list, power.entry) { | 812 | while (!list_empty(&dpm_list)) { |
813 | struct device *dev = to_device(dpm_list.prev); | ||
814 | |||
815 | get_device(dev); | ||
816 | mutex_unlock(&dpm_list_mtx); | ||
817 | |||
799 | error = device_suspend_noirq(dev, state); | 818 | error = device_suspend_noirq(dev, state); |
819 | |||
820 | mutex_lock(&dpm_list_mtx); | ||
800 | if (error) { | 821 | if (error) { |
801 | pm_dev_err(dev, state, " late", error); | 822 | pm_dev_err(dev, state, " late", error); |
823 | put_device(dev); | ||
802 | break; | 824 | break; |
803 | } | 825 | } |
804 | dev->power.status = DPM_OFF_IRQ; | 826 | dev->power.status = DPM_OFF_IRQ; |
827 | if (!list_empty(&dev->power.entry)) | ||
828 | list_move(&dev->power.entry, &list); | ||
829 | put_device(dev); | ||
805 | } | 830 | } |
831 | list_splice_tail(&list, &dpm_list); | ||
806 | mutex_unlock(&dpm_list_mtx); | 832 | mutex_unlock(&dpm_list_mtx); |
807 | if (error) | 833 | if (error) |
808 | dpm_resume_noirq(resume_event(state)); | 834 | dpm_resume_noirq(resume_event(state)); |
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 575495f3c4b8..727d0225b7d0 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c | |||
@@ -62,8 +62,8 @@ static int cciss_scsi_proc_info( | |||
62 | int length, /* length of data in buffer */ | 62 | int length, /* length of data in buffer */ |
63 | int func); /* 0 == read, 1 == write */ | 63 | int func); /* 0 == read, 1 == write */ |
64 | 64 | ||
65 | static int cciss_scsi_queue_command (struct scsi_cmnd *cmd, | 65 | static int cciss_scsi_queue_command (struct Scsi_Host *h, |
66 | void (* done)(struct scsi_cmnd *)); | 66 | struct scsi_cmnd *cmd); |
67 | static int cciss_eh_device_reset_handler(struct scsi_cmnd *); | 67 | static int cciss_eh_device_reset_handler(struct scsi_cmnd *); |
68 | static int cciss_eh_abort_handler(struct scsi_cmnd *); | 68 | static int cciss_eh_abort_handler(struct scsi_cmnd *); |
69 | 69 | ||
@@ -1406,7 +1406,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c, | |||
1406 | 1406 | ||
1407 | 1407 | ||
1408 | static int | 1408 | static int |
1409 | cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | 1409 | cciss_scsi_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
1410 | { | 1410 | { |
1411 | ctlr_info_t *h; | 1411 | ctlr_info_t *h; |
1412 | int rc; | 1412 | int rc; |
@@ -1504,6 +1504,8 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd | |||
1504 | return 0; | 1504 | return 0; |
1505 | } | 1505 | } |
1506 | 1506 | ||
1507 | static DEF_SCSI_QCMD(cciss_scsi_queue_command) | ||
1508 | |||
1507 | static void cciss_unregister_scsi(ctlr_info_t *h) | 1509 | static void cciss_unregister_scsi(ctlr_info_t *h) |
1508 | { | 1510 | { |
1509 | struct cciss_scsi_adapter_data_t *sa; | 1511 | struct cciss_scsi_adapter_data_t *sa; |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index d299fe9e78c8..89d8a7cc4054 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <linux/memcontrol.h> | 36 | #include <linux/memcontrol.h> |
37 | #include <linux/mm_inline.h> | 37 | #include <linux/mm_inline.h> |
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/smp_lock.h> | ||
40 | #include <linux/pkt_sched.h> | 39 | #include <linux/pkt_sched.h> |
41 | #define __KERNEL_SYSCALLS__ | 40 | #define __KERNEL_SYSCALLS__ |
42 | #include <linux/unistd.h> | 41 | #include <linux/unistd.h> |
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index b0551ba7ad0c..47d223c2409c 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/drbd.h> | 27 | #include <linux/drbd.h> |
28 | #include <linux/sched.h> | 28 | #include <linux/sched.h> |
29 | #include <linux/smp_lock.h> | ||
30 | #include <linux/wait.h> | 29 | #include <linux/wait.h> |
31 | #include <linux/mm.h> | 30 | #include <linux/mm.h> |
32 | #include <linux/memcontrol.h> | 31 | #include <linux/memcontrol.h> |
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c index 43412c03969e..3cb4539a98b2 100644 --- a/drivers/char/agp/frontend.c +++ b/drivers/char/agp/frontend.c | |||
@@ -39,7 +39,6 @@ | |||
39 | #include <linux/mm.h> | 39 | #include <linux/mm.h> |
40 | #include <linux/fs.h> | 40 | #include <linux/fs.h> |
41 | #include <linux/sched.h> | 41 | #include <linux/sched.h> |
42 | #include <linux/smp_lock.h> | ||
43 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
44 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
45 | #include "agp.h" | 44 | #include "agp.h" |
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c index c0bd6f472c52..6ee3348bc3e4 100644 --- a/drivers/char/amiserial.c +++ b/drivers/char/amiserial.c | |||
@@ -81,7 +81,6 @@ static char *serial_version = "4.30"; | |||
81 | #include <linux/mm.h> | 81 | #include <linux/mm.h> |
82 | #include <linux/seq_file.h> | 82 | #include <linux/seq_file.h> |
83 | #include <linux/slab.h> | 83 | #include <linux/slab.h> |
84 | #include <linux/smp_lock.h> | ||
85 | #include <linux/init.h> | 84 | #include <linux/init.h> |
86 | #include <linux/bitops.h> | 85 | #include <linux/bitops.h> |
87 | #include <linux/platform_device.h> | 86 | #include <linux/platform_device.h> |
diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c index f6718f05dad4..095ab90535ce 100644 --- a/drivers/char/briq_panel.c +++ b/drivers/char/briq_panel.c | |||
@@ -6,7 +6,6 @@ | |||
6 | 6 | ||
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | 8 | ||
9 | #include <linux/smp_lock.h> | ||
10 | #include <linux/types.h> | 9 | #include <linux/types.h> |
11 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
12 | #include <linux/tty.h> | 11 | #include <linux/tty.h> |
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 55b8667f739f..7066e801b9d3 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/smp_lock.h> | ||
18 | #include <linux/types.h> | 17 | #include <linux/types.h> |
19 | #include <linux/miscdevice.h> | 18 | #include <linux/miscdevice.h> |
20 | #include <linux/major.h> | 19 | #include <linux/major.h> |
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 788da05190cc..2016aad85203 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <linux/kernel.h> | 37 | #include <linux/kernel.h> |
38 | #include <linux/fs.h> | 38 | #include <linux/fs.h> |
39 | #include <linux/sched.h> | 39 | #include <linux/sched.h> |
40 | #include <linux/smp_lock.h> | ||
41 | #include <linux/init.h> | 40 | #include <linux/init.h> |
42 | #include <linux/miscdevice.h> | 41 | #include <linux/miscdevice.h> |
43 | #include <linux/delay.h> | 42 | #include <linux/delay.h> |
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c index f0863bec186f..d72433f2d310 100644 --- a/drivers/char/i8k.c +++ b/drivers/char/i8k.c | |||
@@ -120,7 +120,7 @@ static int i8k_smm(struct smm_regs *regs) | |||
120 | int eax = regs->eax; | 120 | int eax = regs->eax; |
121 | 121 | ||
122 | #if defined(CONFIG_X86_64) | 122 | #if defined(CONFIG_X86_64) |
123 | asm("pushq %%rax\n\t" | 123 | asm volatile("pushq %%rax\n\t" |
124 | "movl 0(%%rax),%%edx\n\t" | 124 | "movl 0(%%rax),%%edx\n\t" |
125 | "pushq %%rdx\n\t" | 125 | "pushq %%rdx\n\t" |
126 | "movl 4(%%rax),%%ebx\n\t" | 126 | "movl 4(%%rax),%%ebx\n\t" |
@@ -142,11 +142,11 @@ static int i8k_smm(struct smm_regs *regs) | |||
142 | "lahf\n\t" | 142 | "lahf\n\t" |
143 | "shrl $8,%%eax\n\t" | 143 | "shrl $8,%%eax\n\t" |
144 | "andl $1,%%eax\n" | 144 | "andl $1,%%eax\n" |
145 | :"=a"(rc), "+m" (*regs) | 145 | :"=a"(rc) |
146 | : "a"(regs) | 146 | : "a"(regs) |
147 | : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); | 147 | : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); |
148 | #else | 148 | #else |
149 | asm("pushl %%eax\n\t" | 149 | asm volatile("pushl %%eax\n\t" |
150 | "movl 0(%%eax),%%edx\n\t" | 150 | "movl 0(%%eax),%%edx\n\t" |
151 | "push %%edx\n\t" | 151 | "push %%edx\n\t" |
152 | "movl 4(%%eax),%%ebx\n\t" | 152 | "movl 4(%%eax),%%ebx\n\t" |
@@ -168,7 +168,7 @@ static int i8k_smm(struct smm_regs *regs) | |||
168 | "lahf\n\t" | 168 | "lahf\n\t" |
169 | "shrl $8,%%eax\n\t" | 169 | "shrl $8,%%eax\n\t" |
170 | "andl $1,%%eax\n" | 170 | "andl $1,%%eax\n" |
171 | :"=a"(rc), "+m" (*regs) | 171 | :"=a"(rc) |
172 | : "a"(regs) | 172 | : "a"(regs) |
173 | : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); | 173 | : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); |
174 | #endif | 174 | #endif |
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c index 667abd23ad6a..7c6de4c92458 100644 --- a/drivers/char/istallion.c +++ b/drivers/char/istallion.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/smp_lock.h> | ||
25 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
26 | #include <linux/tty.h> | 25 | #include <linux/tty.h> |
27 | #include <linux/tty_flip.h> | 26 | #include <linux/tty_flip.h> |
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c index f646725bd567..748c3b0ecd89 100644 --- a/drivers/char/serial167.c +++ b/drivers/char/serial167.c | |||
@@ -52,7 +52,6 @@ | |||
52 | #include <linux/interrupt.h> | 52 | #include <linux/interrupt.h> |
53 | #include <linux/serial.h> | 53 | #include <linux/serial.h> |
54 | #include <linux/serialP.h> | 54 | #include <linux/serialP.h> |
55 | #include <linux/smp_lock.h> | ||
56 | #include <linux/string.h> | 55 | #include <linux/string.h> |
57 | #include <linux/fcntl.h> | 56 | #include <linux/fcntl.h> |
58 | #include <linux/ptrace.h> | 57 | #include <linux/ptrace.h> |
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c index 9f8495b4fc8f..a7616d226a49 100644 --- a/drivers/char/specialix.c +++ b/drivers/char/specialix.c | |||
@@ -87,7 +87,6 @@ | |||
87 | #include <linux/tty_flip.h> | 87 | #include <linux/tty_flip.h> |
88 | #include <linux/mm.h> | 88 | #include <linux/mm.h> |
89 | #include <linux/serial.h> | 89 | #include <linux/serial.h> |
90 | #include <linux/smp_lock.h> | ||
91 | #include <linux/fcntl.h> | 90 | #include <linux/fcntl.h> |
92 | #include <linux/major.h> | 91 | #include <linux/major.h> |
93 | #include <linux/delay.h> | 92 | #include <linux/delay.h> |
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c index 4bef6ab83622..461a5a045517 100644 --- a/drivers/char/stallion.c +++ b/drivers/char/stallion.c | |||
@@ -40,7 +40,6 @@ | |||
40 | #include <linux/stallion.h> | 40 | #include <linux/stallion.h> |
41 | #include <linux/ioport.h> | 41 | #include <linux/ioport.h> |
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/smp_lock.h> | ||
44 | #include <linux/device.h> | 43 | #include <linux/device.h> |
45 | #include <linux/delay.h> | 44 | #include <linux/delay.h> |
46 | #include <linux/ctype.h> | 45 | #include <linux/ctype.h> |
diff --git a/drivers/char/sx.c b/drivers/char/sx.c index e53f16865397..a786326cea2f 100644 --- a/drivers/char/sx.c +++ b/drivers/char/sx.c | |||
@@ -216,7 +216,6 @@ | |||
216 | #include <linux/eisa.h> | 216 | #include <linux/eisa.h> |
217 | #include <linux/pci.h> | 217 | #include <linux/pci.h> |
218 | #include <linux/slab.h> | 218 | #include <linux/slab.h> |
219 | #include <linux/smp_lock.h> | ||
220 | #include <linux/init.h> | 219 | #include <linux/init.h> |
221 | #include <linux/miscdevice.h> | 220 | #include <linux/miscdevice.h> |
222 | #include <linux/bitops.h> | 221 | #include <linux/bitops.h> |
diff --git a/drivers/char/uv_mmtimer.c b/drivers/char/uv_mmtimer.c index 493b47a0d511..956ebe2080a5 100644 --- a/drivers/char/uv_mmtimer.c +++ b/drivers/char/uv_mmtimer.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/time.h> | 24 | #include <linux/time.h> |
25 | #include <linux/math64.h> | 25 | #include <linux/math64.h> |
26 | #include <linux/smp_lock.h> | ||
27 | 26 | ||
28 | #include <asm/genapic.h> | 27 | #include <asm/genapic.h> |
29 | #include <asm/uv/uv_hub.h> | 28 | #include <asm/uv/uv_hub.h> |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 6c1b676643a9..896a2ced1d27 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -1547,31 +1547,16 @@ static int init_vqs(struct ports_device *portdev) | |||
1547 | nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; | 1547 | nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; |
1548 | 1548 | ||
1549 | vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); | 1549 | vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); |
1550 | if (!vqs) { | ||
1551 | err = -ENOMEM; | ||
1552 | goto fail; | ||
1553 | } | ||
1554 | io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); | 1550 | io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); |
1555 | if (!io_callbacks) { | ||
1556 | err = -ENOMEM; | ||
1557 | goto free_vqs; | ||
1558 | } | ||
1559 | io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); | 1551 | io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); |
1560 | if (!io_names) { | ||
1561 | err = -ENOMEM; | ||
1562 | goto free_callbacks; | ||
1563 | } | ||
1564 | portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), | 1552 | portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), |
1565 | GFP_KERNEL); | 1553 | GFP_KERNEL); |
1566 | if (!portdev->in_vqs) { | ||
1567 | err = -ENOMEM; | ||
1568 | goto free_names; | ||
1569 | } | ||
1570 | portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), | 1554 | portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), |
1571 | GFP_KERNEL); | 1555 | GFP_KERNEL); |
1572 | if (!portdev->out_vqs) { | 1556 | if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || |
1557 | !portdev->out_vqs) { | ||
1573 | err = -ENOMEM; | 1558 | err = -ENOMEM; |
1574 | goto free_invqs; | 1559 | goto free; |
1575 | } | 1560 | } |
1576 | 1561 | ||
1577 | /* | 1562 | /* |
@@ -1605,7 +1590,7 @@ static int init_vqs(struct ports_device *portdev) | |||
1605 | io_callbacks, | 1590 | io_callbacks, |
1606 | (const char **)io_names); | 1591 | (const char **)io_names); |
1607 | if (err) | 1592 | if (err) |
1608 | goto free_outvqs; | 1593 | goto free; |
1609 | 1594 | ||
1610 | j = 0; | 1595 | j = 0; |
1611 | portdev->in_vqs[0] = vqs[0]; | 1596 | portdev->in_vqs[0] = vqs[0]; |
@@ -1621,23 +1606,19 @@ static int init_vqs(struct ports_device *portdev) | |||
1621 | portdev->out_vqs[i] = vqs[j + 1]; | 1606 | portdev->out_vqs[i] = vqs[j + 1]; |
1622 | } | 1607 | } |
1623 | } | 1608 | } |
1624 | kfree(io_callbacks); | ||
1625 | kfree(io_names); | 1609 | kfree(io_names); |
1610 | kfree(io_callbacks); | ||
1626 | kfree(vqs); | 1611 | kfree(vqs); |
1627 | 1612 | ||
1628 | return 0; | 1613 | return 0; |
1629 | 1614 | ||
1630 | free_names: | 1615 | free: |
1631 | kfree(io_names); | ||
1632 | free_callbacks: | ||
1633 | kfree(io_callbacks); | ||
1634 | free_outvqs: | ||
1635 | kfree(portdev->out_vqs); | 1616 | kfree(portdev->out_vqs); |
1636 | free_invqs: | ||
1637 | kfree(portdev->in_vqs); | 1617 | kfree(portdev->in_vqs); |
1638 | free_vqs: | 1618 | kfree(io_names); |
1619 | kfree(io_callbacks); | ||
1639 | kfree(vqs); | 1620 | kfree(vqs); |
1640 | fail: | 1621 | |
1641 | return err; | 1622 | return err; |
1642 | } | 1623 | } |
1643 | 1624 | ||
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index bfae4b309791..afa576a75a8e 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
@@ -1468,7 +1468,7 @@ static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, | |||
1468 | 1468 | ||
1469 | /* SCSI stack integration */ | 1469 | /* SCSI stack integration */ |
1470 | 1470 | ||
1471 | static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | 1471 | static int sbp2_scsi_queuecommand_lck(struct scsi_cmnd *cmd, scsi_done_fn_t done) |
1472 | { | 1472 | { |
1473 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | 1473 | struct sbp2_logical_unit *lu = cmd->device->hostdata; |
1474 | struct fw_device *device = target_device(lu->tgt); | 1474 | struct fw_device *device = target_device(lu->tgt); |
@@ -1534,6 +1534,8 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1534 | return retval; | 1534 | return retval; |
1535 | } | 1535 | } |
1536 | 1536 | ||
1537 | static DEF_SCSI_QCMD(sbp2_scsi_queuecommand) | ||
1538 | |||
1537 | static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) | 1539 | static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) |
1538 | { | 1540 | { |
1539 | struct sbp2_logical_unit *lu = sdev->hostdata; | 1541 | struct sbp2_logical_unit *lu = sdev->hostdata; |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index b744dad5c237..a39794bac04b 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include "drmP.h" | 37 | #include "drmP.h" |
38 | #include <linux/poll.h> | 38 | #include <linux/poll.h> |
39 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
40 | #include <linux/smp_lock.h> | ||
41 | 40 | ||
42 | /* from BKL pushdown: note that nothing else serializes idr_find() */ | 41 | /* from BKL pushdown: note that nothing else serializes idr_find() */ |
43 | DEFINE_MUTEX(drm_global_mutex); | 42 | DEFINE_MUTEX(drm_global_mutex); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 80745f85902c..f737960712e6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -150,7 +150,8 @@ static const struct intel_device_info intel_ironlake_d_info = { | |||
150 | 150 | ||
151 | static const struct intel_device_info intel_ironlake_m_info = { | 151 | static const struct intel_device_info intel_ironlake_m_info = { |
152 | .gen = 5, .is_mobile = 1, | 152 | .gen = 5, .is_mobile = 1, |
153 | .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, | 153 | .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1, |
154 | .has_fbc = 0, /* disabled due to buggy hardware */ | ||
154 | .has_bsd_ring = 1, | 155 | .has_bsd_ring = 1, |
155 | }; | 156 | }; |
156 | 157 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 90414ae86afc..409826da3099 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1045,6 +1045,8 @@ void i915_gem_clflush_object(struct drm_gem_object *obj); | |||
1045 | int i915_gem_object_set_domain(struct drm_gem_object *obj, | 1045 | int i915_gem_object_set_domain(struct drm_gem_object *obj, |
1046 | uint32_t read_domains, | 1046 | uint32_t read_domains, |
1047 | uint32_t write_domain); | 1047 | uint32_t write_domain); |
1048 | int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | ||
1049 | bool interruptible); | ||
1048 | int i915_gem_init_ringbuffer(struct drm_device *dev); | 1050 | int i915_gem_init_ringbuffer(struct drm_device *dev); |
1049 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 1051 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
1050 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 1052 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ef188e391406..17b1cba3b5f1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -547,6 +547,19 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
547 | struct drm_i915_gem_object *obj_priv; | 547 | struct drm_i915_gem_object *obj_priv; |
548 | int ret = 0; | 548 | int ret = 0; |
549 | 549 | ||
550 | if (args->size == 0) | ||
551 | return 0; | ||
552 | |||
553 | if (!access_ok(VERIFY_WRITE, | ||
554 | (char __user *)(uintptr_t)args->data_ptr, | ||
555 | args->size)) | ||
556 | return -EFAULT; | ||
557 | |||
558 | ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr, | ||
559 | args->size); | ||
560 | if (ret) | ||
561 | return -EFAULT; | ||
562 | |||
550 | ret = i915_mutex_lock_interruptible(dev); | 563 | ret = i915_mutex_lock_interruptible(dev); |
551 | if (ret) | 564 | if (ret) |
552 | return ret; | 565 | return ret; |
@@ -564,23 +577,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
564 | goto out; | 577 | goto out; |
565 | } | 578 | } |
566 | 579 | ||
567 | if (args->size == 0) | ||
568 | goto out; | ||
569 | |||
570 | if (!access_ok(VERIFY_WRITE, | ||
571 | (char __user *)(uintptr_t)args->data_ptr, | ||
572 | args->size)) { | ||
573 | ret = -EFAULT; | ||
574 | goto out; | ||
575 | } | ||
576 | |||
577 | ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr, | ||
578 | args->size); | ||
579 | if (ret) { | ||
580 | ret = -EFAULT; | ||
581 | goto out; | ||
582 | } | ||
583 | |||
584 | ret = i915_gem_object_get_pages_or_evict(obj); | 580 | ret = i915_gem_object_get_pages_or_evict(obj); |
585 | if (ret) | 581 | if (ret) |
586 | goto out; | 582 | goto out; |
@@ -981,7 +977,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
981 | struct drm_i915_gem_pwrite *args = data; | 977 | struct drm_i915_gem_pwrite *args = data; |
982 | struct drm_gem_object *obj; | 978 | struct drm_gem_object *obj; |
983 | struct drm_i915_gem_object *obj_priv; | 979 | struct drm_i915_gem_object *obj_priv; |
984 | int ret = 0; | 980 | int ret; |
981 | |||
982 | if (args->size == 0) | ||
983 | return 0; | ||
984 | |||
985 | if (!access_ok(VERIFY_READ, | ||
986 | (char __user *)(uintptr_t)args->data_ptr, | ||
987 | args->size)) | ||
988 | return -EFAULT; | ||
989 | |||
990 | ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, | ||
991 | args->size); | ||
992 | if (ret) | ||
993 | return -EFAULT; | ||
985 | 994 | ||
986 | ret = i915_mutex_lock_interruptible(dev); | 995 | ret = i915_mutex_lock_interruptible(dev); |
987 | if (ret) | 996 | if (ret) |
@@ -994,30 +1003,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
994 | } | 1003 | } |
995 | obj_priv = to_intel_bo(obj); | 1004 | obj_priv = to_intel_bo(obj); |
996 | 1005 | ||
997 | |||
998 | /* Bounds check destination. */ | 1006 | /* Bounds check destination. */ |
999 | if (args->offset > obj->size || args->size > obj->size - args->offset) { | 1007 | if (args->offset > obj->size || args->size > obj->size - args->offset) { |
1000 | ret = -EINVAL; | 1008 | ret = -EINVAL; |
1001 | goto out; | 1009 | goto out; |
1002 | } | 1010 | } |
1003 | 1011 | ||
1004 | if (args->size == 0) | ||
1005 | goto out; | ||
1006 | |||
1007 | if (!access_ok(VERIFY_READ, | ||
1008 | (char __user *)(uintptr_t)args->data_ptr, | ||
1009 | args->size)) { | ||
1010 | ret = -EFAULT; | ||
1011 | goto out; | ||
1012 | } | ||
1013 | |||
1014 | ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, | ||
1015 | args->size); | ||
1016 | if (ret) { | ||
1017 | ret = -EFAULT; | ||
1018 | goto out; | ||
1019 | } | ||
1020 | |||
1021 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | 1012 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
1022 | * it would end up going through the fenced access, and we'll get | 1013 | * it would end up going through the fenced access, and we'll get |
1023 | * different detiling behavior between reading and writing. | 1014 | * different detiling behavior between reading and writing. |
@@ -2907,6 +2898,20 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, | |||
2907 | return 0; | 2898 | return 0; |
2908 | } | 2899 | } |
2909 | 2900 | ||
2901 | int | ||
2902 | i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | ||
2903 | bool interruptible) | ||
2904 | { | ||
2905 | if (!obj->active) | ||
2906 | return 0; | ||
2907 | |||
2908 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) | ||
2909 | i915_gem_flush_ring(obj->base.dev, NULL, obj->ring, | ||
2910 | 0, obj->base.write_domain); | ||
2911 | |||
2912 | return i915_gem_object_wait_rendering(&obj->base, interruptible); | ||
2913 | } | ||
2914 | |||
2910 | /** | 2915 | /** |
2911 | * Moves a single object to the CPU read, and possibly write domain. | 2916 | * Moves a single object to the CPU read, and possibly write domain. |
2912 | * | 2917 | * |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index c55c77043357..8df574316063 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -34,6 +34,25 @@ | |||
34 | #include "i915_drm.h" | 34 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | 35 | #include "i915_drv.h" |
36 | 36 | ||
37 | /* Here's the desired hotplug mode */ | ||
38 | #define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \ | ||
39 | ADPA_CRT_HOTPLUG_WARMUP_10MS | \ | ||
40 | ADPA_CRT_HOTPLUG_SAMPLE_4S | \ | ||
41 | ADPA_CRT_HOTPLUG_VOLTAGE_50 | \ | ||
42 | ADPA_CRT_HOTPLUG_VOLREF_325MV | \ | ||
43 | ADPA_CRT_HOTPLUG_ENABLE) | ||
44 | |||
45 | struct intel_crt { | ||
46 | struct intel_encoder base; | ||
47 | bool force_hotplug_required; | ||
48 | }; | ||
49 | |||
50 | static struct intel_crt *intel_attached_crt(struct drm_connector *connector) | ||
51 | { | ||
52 | return container_of(intel_attached_encoder(connector), | ||
53 | struct intel_crt, base); | ||
54 | } | ||
55 | |||
37 | static void intel_crt_dpms(struct drm_encoder *encoder, int mode) | 56 | static void intel_crt_dpms(struct drm_encoder *encoder, int mode) |
38 | { | 57 | { |
39 | struct drm_device *dev = encoder->dev; | 58 | struct drm_device *dev = encoder->dev; |
@@ -129,7 +148,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
129 | dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); | 148 | dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); |
130 | } | 149 | } |
131 | 150 | ||
132 | adpa = 0; | 151 | adpa = ADPA_HOTPLUG_BITS; |
133 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 152 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
134 | adpa |= ADPA_HSYNC_ACTIVE_HIGH; | 153 | adpa |= ADPA_HSYNC_ACTIVE_HIGH; |
135 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 154 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
@@ -157,53 +176,44 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
157 | static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | 176 | static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) |
158 | { | 177 | { |
159 | struct drm_device *dev = connector->dev; | 178 | struct drm_device *dev = connector->dev; |
179 | struct intel_crt *crt = intel_attached_crt(connector); | ||
160 | struct drm_i915_private *dev_priv = dev->dev_private; | 180 | struct drm_i915_private *dev_priv = dev->dev_private; |
161 | u32 adpa, temp; | 181 | u32 adpa; |
162 | bool ret; | 182 | bool ret; |
163 | bool turn_off_dac = false; | ||
164 | 183 | ||
165 | temp = adpa = I915_READ(PCH_ADPA); | 184 | /* The first time through, trigger an explicit detection cycle */ |
185 | if (crt->force_hotplug_required) { | ||
186 | bool turn_off_dac = HAS_PCH_SPLIT(dev); | ||
187 | u32 save_adpa; | ||
166 | 188 | ||
167 | if (HAS_PCH_SPLIT(dev)) | 189 | crt->force_hotplug_required = 0; |
168 | turn_off_dac = true; | 190 | |
169 | 191 | save_adpa = adpa = I915_READ(PCH_ADPA); | |
170 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | 192 | DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); |
171 | if (turn_off_dac) | 193 | |
172 | adpa &= ~ADPA_DAC_ENABLE; | 194 | adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; |
173 | 195 | if (turn_off_dac) | |
174 | /* disable HPD first */ | 196 | adpa &= ~ADPA_DAC_ENABLE; |
175 | I915_WRITE(PCH_ADPA, adpa); | 197 | |
176 | (void)I915_READ(PCH_ADPA); | 198 | I915_WRITE(PCH_ADPA, adpa); |
177 | 199 | ||
178 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | | 200 | if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, |
179 | ADPA_CRT_HOTPLUG_WARMUP_10MS | | 201 | 1000)) |
180 | ADPA_CRT_HOTPLUG_SAMPLE_4S | | 202 | DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); |
181 | ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */ | 203 | |
182 | ADPA_CRT_HOTPLUG_VOLREF_325MV | | 204 | if (turn_off_dac) { |
183 | ADPA_CRT_HOTPLUG_ENABLE | | 205 | I915_WRITE(PCH_ADPA, save_adpa); |
184 | ADPA_CRT_HOTPLUG_FORCE_TRIGGER); | 206 | POSTING_READ(PCH_ADPA); |
185 | 207 | } | |
186 | DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa); | ||
187 | I915_WRITE(PCH_ADPA, adpa); | ||
188 | |||
189 | if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, | ||
190 | 1000)) | ||
191 | DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); | ||
192 | |||
193 | if (turn_off_dac) { | ||
194 | /* Make sure hotplug is enabled */ | ||
195 | I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE); | ||
196 | (void)I915_READ(PCH_ADPA); | ||
197 | } | 208 | } |
198 | 209 | ||
199 | /* Check the status to see if both blue and green are on now */ | 210 | /* Check the status to see if both blue and green are on now */ |
200 | adpa = I915_READ(PCH_ADPA); | 211 | adpa = I915_READ(PCH_ADPA); |
201 | adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; | 212 | if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) |
202 | if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) || | ||
203 | (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO)) | ||
204 | ret = true; | 213 | ret = true; |
205 | else | 214 | else |
206 | ret = false; | 215 | ret = false; |
216 | DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret); | ||
207 | 217 | ||
208 | return ret; | 218 | return ret; |
209 | } | 219 | } |
@@ -277,13 +287,12 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus) | |||
277 | return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; | 287 | return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; |
278 | } | 288 | } |
279 | 289 | ||
280 | static bool intel_crt_detect_ddc(struct drm_encoder *encoder) | 290 | static bool intel_crt_detect_ddc(struct intel_crt *crt) |
281 | { | 291 | { |
282 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | 292 | struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; |
283 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | ||
284 | 293 | ||
285 | /* CRT should always be at 0, but check anyway */ | 294 | /* CRT should always be at 0, but check anyway */ |
286 | if (intel_encoder->type != INTEL_OUTPUT_ANALOG) | 295 | if (crt->base.type != INTEL_OUTPUT_ANALOG) |
287 | return false; | 296 | return false; |
288 | 297 | ||
289 | if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) { | 298 | if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) { |
@@ -291,7 +300,7 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder) | |||
291 | return true; | 300 | return true; |
292 | } | 301 | } |
293 | 302 | ||
294 | if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) { | 303 | if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { |
295 | DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); | 304 | DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); |
296 | return true; | 305 | return true; |
297 | } | 306 | } |
@@ -300,9 +309,9 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder) | |||
300 | } | 309 | } |
301 | 310 | ||
302 | static enum drm_connector_status | 311 | static enum drm_connector_status |
303 | intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) | 312 | intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt) |
304 | { | 313 | { |
305 | struct drm_encoder *encoder = &intel_encoder->base; | 314 | struct drm_encoder *encoder = &crt->base.base; |
306 | struct drm_device *dev = encoder->dev; | 315 | struct drm_device *dev = encoder->dev; |
307 | struct drm_i915_private *dev_priv = dev->dev_private; | 316 | struct drm_i915_private *dev_priv = dev->dev_private; |
308 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 317 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
@@ -434,7 +443,7 @@ static enum drm_connector_status | |||
434 | intel_crt_detect(struct drm_connector *connector, bool force) | 443 | intel_crt_detect(struct drm_connector *connector, bool force) |
435 | { | 444 | { |
436 | struct drm_device *dev = connector->dev; | 445 | struct drm_device *dev = connector->dev; |
437 | struct intel_encoder *encoder = intel_attached_encoder(connector); | 446 | struct intel_crt *crt = intel_attached_crt(connector); |
438 | struct drm_crtc *crtc; | 447 | struct drm_crtc *crtc; |
439 | int dpms_mode; | 448 | int dpms_mode; |
440 | enum drm_connector_status status; | 449 | enum drm_connector_status status; |
@@ -443,28 +452,31 @@ intel_crt_detect(struct drm_connector *connector, bool force) | |||
443 | if (intel_crt_detect_hotplug(connector)) { | 452 | if (intel_crt_detect_hotplug(connector)) { |
444 | DRM_DEBUG_KMS("CRT detected via hotplug\n"); | 453 | DRM_DEBUG_KMS("CRT detected via hotplug\n"); |
445 | return connector_status_connected; | 454 | return connector_status_connected; |
446 | } else | 455 | } else { |
456 | DRM_DEBUG_KMS("CRT not detected via hotplug\n"); | ||
447 | return connector_status_disconnected; | 457 | return connector_status_disconnected; |
458 | } | ||
448 | } | 459 | } |
449 | 460 | ||
450 | if (intel_crt_detect_ddc(&encoder->base)) | 461 | if (intel_crt_detect_ddc(crt)) |
451 | return connector_status_connected; | 462 | return connector_status_connected; |
452 | 463 | ||
453 | if (!force) | 464 | if (!force) |
454 | return connector->status; | 465 | return connector->status; |
455 | 466 | ||
456 | /* for pre-945g platforms use load detect */ | 467 | /* for pre-945g platforms use load detect */ |
457 | if (encoder->base.crtc && encoder->base.crtc->enabled) { | 468 | crtc = crt->base.base.crtc; |
458 | status = intel_crt_load_detect(encoder->base.crtc, encoder); | 469 | if (crtc && crtc->enabled) { |
470 | status = intel_crt_load_detect(crtc, crt); | ||
459 | } else { | 471 | } else { |
460 | crtc = intel_get_load_detect_pipe(encoder, connector, | 472 | crtc = intel_get_load_detect_pipe(&crt->base, connector, |
461 | NULL, &dpms_mode); | 473 | NULL, &dpms_mode); |
462 | if (crtc) { | 474 | if (crtc) { |
463 | if (intel_crt_detect_ddc(&encoder->base)) | 475 | if (intel_crt_detect_ddc(crt)) |
464 | status = connector_status_connected; | 476 | status = connector_status_connected; |
465 | else | 477 | else |
466 | status = intel_crt_load_detect(crtc, encoder); | 478 | status = intel_crt_load_detect(crtc, crt); |
467 | intel_release_load_detect_pipe(encoder, | 479 | intel_release_load_detect_pipe(&crt->base, |
468 | connector, dpms_mode); | 480 | connector, dpms_mode); |
469 | } else | 481 | } else |
470 | status = connector_status_unknown; | 482 | status = connector_status_unknown; |
@@ -536,17 +548,17 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { | |||
536 | void intel_crt_init(struct drm_device *dev) | 548 | void intel_crt_init(struct drm_device *dev) |
537 | { | 549 | { |
538 | struct drm_connector *connector; | 550 | struct drm_connector *connector; |
539 | struct intel_encoder *intel_encoder; | 551 | struct intel_crt *crt; |
540 | struct intel_connector *intel_connector; | 552 | struct intel_connector *intel_connector; |
541 | struct drm_i915_private *dev_priv = dev->dev_private; | 553 | struct drm_i915_private *dev_priv = dev->dev_private; |
542 | 554 | ||
543 | intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); | 555 | crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL); |
544 | if (!intel_encoder) | 556 | if (!crt) |
545 | return; | 557 | return; |
546 | 558 | ||
547 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 559 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
548 | if (!intel_connector) { | 560 | if (!intel_connector) { |
549 | kfree(intel_encoder); | 561 | kfree(crt); |
550 | return; | 562 | return; |
551 | } | 563 | } |
552 | 564 | ||
@@ -554,20 +566,20 @@ void intel_crt_init(struct drm_device *dev) | |||
554 | drm_connector_init(dev, &intel_connector->base, | 566 | drm_connector_init(dev, &intel_connector->base, |
555 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); | 567 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); |
556 | 568 | ||
557 | drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs, | 569 | drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, |
558 | DRM_MODE_ENCODER_DAC); | 570 | DRM_MODE_ENCODER_DAC); |
559 | 571 | ||
560 | intel_connector_attach_encoder(intel_connector, intel_encoder); | 572 | intel_connector_attach_encoder(intel_connector, &crt->base); |
561 | 573 | ||
562 | intel_encoder->type = INTEL_OUTPUT_ANALOG; | 574 | crt->base.type = INTEL_OUTPUT_ANALOG; |
563 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 575 | crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT | |
564 | (1 << INTEL_ANALOG_CLONE_BIT) | | 576 | 1 << INTEL_ANALOG_CLONE_BIT | |
565 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | 577 | 1 << INTEL_SDVO_LVDS_CLONE_BIT); |
566 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 578 | crt->base.crtc_mask = (1 << 0) | (1 << 1); |
567 | connector->interlace_allowed = 1; | 579 | connector->interlace_allowed = 1; |
568 | connector->doublescan_allowed = 0; | 580 | connector->doublescan_allowed = 0; |
569 | 581 | ||
570 | drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs); | 582 | drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs); |
571 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); | 583 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); |
572 | 584 | ||
573 | drm_sysfs_connector_add(connector); | 585 | drm_sysfs_connector_add(connector); |
@@ -577,5 +589,22 @@ void intel_crt_init(struct drm_device *dev) | |||
577 | else | 589 | else |
578 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | 590 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
579 | 591 | ||
592 | /* | ||
593 | * Configure the automatic hotplug detection stuff | ||
594 | */ | ||
595 | crt->force_hotplug_required = 0; | ||
596 | if (HAS_PCH_SPLIT(dev)) { | ||
597 | u32 adpa; | ||
598 | |||
599 | adpa = I915_READ(PCH_ADPA); | ||
600 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | ||
601 | adpa |= ADPA_HOTPLUG_BITS; | ||
602 | I915_WRITE(PCH_ADPA, adpa); | ||
603 | POSTING_READ(PCH_ADPA); | ||
604 | |||
605 | DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); | ||
606 | crt->force_hotplug_required = 1; | ||
607 | } | ||
608 | |||
580 | dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; | 609 | dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; |
581 | } | 610 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 48d8fd686ea9..bee24b1a58e8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1611,6 +1611,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1611 | 1611 | ||
1612 | wait_event(dev_priv->pending_flip_queue, | 1612 | wait_event(dev_priv->pending_flip_queue, |
1613 | atomic_read(&obj_priv->pending_flip) == 0); | 1613 | atomic_read(&obj_priv->pending_flip) == 0); |
1614 | |||
1615 | /* Big Hammer, we also need to ensure that any pending | ||
1616 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the | ||
1617 | * current scanout is retired before unpinning the old | ||
1618 | * framebuffer. | ||
1619 | */ | ||
1620 | ret = i915_gem_object_flush_gpu(obj_priv, false); | ||
1621 | if (ret) { | ||
1622 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | ||
1623 | mutex_unlock(&dev->struct_mutex); | ||
1624 | return ret; | ||
1625 | } | ||
1614 | } | 1626 | } |
1615 | 1627 | ||
1616 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, | 1628 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 2be4f728ed0c..3dba086e7eea 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -160,7 +160,7 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) | |||
160 | }; | 160 | }; |
161 | struct intel_gpio *gpio; | 161 | struct intel_gpio *gpio; |
162 | 162 | ||
163 | if (pin < 1 || pin > 7) | 163 | if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin]) |
164 | return NULL; | 164 | return NULL; |
165 | 165 | ||
166 | gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL); | 166 | gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL); |
@@ -172,7 +172,8 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) | |||
172 | gpio->reg += PCH_GPIOA - GPIOA; | 172 | gpio->reg += PCH_GPIOA - GPIOA; |
173 | gpio->dev_priv = dev_priv; | 173 | gpio->dev_priv = dev_priv; |
174 | 174 | ||
175 | snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]); | 175 | snprintf(gpio->adapter.name, sizeof(gpio->adapter.name), |
176 | "i915 GPIO%c", "?BACDE?F"[pin]); | ||
176 | gpio->adapter.owner = THIS_MODULE; | 177 | gpio->adapter.owner = THIS_MODULE; |
177 | gpio->adapter.algo_data = &gpio->algo; | 178 | gpio->adapter.algo_data = &gpio->algo; |
178 | gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev; | 179 | gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev; |
@@ -349,7 +350,7 @@ int intel_setup_gmbus(struct drm_device *dev) | |||
349 | "panel", | 350 | "panel", |
350 | "dpc", | 351 | "dpc", |
351 | "dpb", | 352 | "dpb", |
352 | "reserved" | 353 | "reserved", |
353 | "dpd", | 354 | "dpd", |
354 | }; | 355 | }; |
355 | struct drm_i915_private *dev_priv = dev->dev_private; | 356 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -366,8 +367,8 @@ int intel_setup_gmbus(struct drm_device *dev) | |||
366 | bus->adapter.owner = THIS_MODULE; | 367 | bus->adapter.owner = THIS_MODULE; |
367 | bus->adapter.class = I2C_CLASS_DDC; | 368 | bus->adapter.class = I2C_CLASS_DDC; |
368 | snprintf(bus->adapter.name, | 369 | snprintf(bus->adapter.name, |
369 | I2C_NAME_SIZE, | 370 | sizeof(bus->adapter.name), |
370 | "gmbus %s", | 371 | "i915 gmbus %s", |
371 | names[i]); | 372 | names[i]); |
372 | 373 | ||
373 | bus->adapter.dev.parent = &dev->pdev->dev; | 374 | bus->adapter.dev.parent = &dev->pdev->dev; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 406228f4a2a0..b14c81110575 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c | |||
@@ -31,6 +31,7 @@ | |||
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/backlight.h> | 33 | #include <linux/backlight.h> |
34 | #include <linux/acpi.h> | ||
34 | 35 | ||
35 | #include "drmP.h" | 36 | #include "drmP.h" |
36 | #include "nouveau_drv.h" | 37 | #include "nouveau_drv.h" |
@@ -136,6 +137,14 @@ int nouveau_backlight_init(struct drm_device *dev) | |||
136 | { | 137 | { |
137 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 138 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
138 | 139 | ||
140 | #ifdef CONFIG_ACPI | ||
141 | if (acpi_video_backlight_support()) { | ||
142 | NV_INFO(dev, "ACPI backlight interface available, " | ||
143 | "not registering our own\n"); | ||
144 | return 0; | ||
145 | } | ||
146 | #endif | ||
147 | |||
139 | switch (dev_priv->card_type) { | 148 | switch (dev_priv->card_type) { |
140 | case NV_40: | 149 | case NV_40: |
141 | return nouveau_nv40_backlight_init(dev); | 150 | return nouveau_nv40_backlight_init(dev); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 5f21030a293b..b2293576f278 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -6829,7 +6829,7 @@ nouveau_bios_posted(struct drm_device *dev) | |||
6829 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 6829 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
6830 | unsigned htotal; | 6830 | unsigned htotal; |
6831 | 6831 | ||
6832 | if (dev_priv->chipset >= NV_50) { | 6832 | if (dev_priv->card_type >= NV_50) { |
6833 | if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && | 6833 | if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && |
6834 | NVReadVgaCrtc(dev, 0, 0x1a) == 0) | 6834 | NVReadVgaCrtc(dev, 0, 0x1a) == 0) |
6835 | return false; | 6835 | return false; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 80353e2b8409..c41e1c200ef5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -143,8 +143,10 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
143 | nvbo->no_vm = no_vm; | 143 | nvbo->no_vm = no_vm; |
144 | nvbo->tile_mode = tile_mode; | 144 | nvbo->tile_mode = tile_mode; |
145 | nvbo->tile_flags = tile_flags; | 145 | nvbo->tile_flags = tile_flags; |
146 | nvbo->bo.bdev = &dev_priv->ttm.bdev; | ||
146 | 147 | ||
147 | nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size); | 148 | nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo), |
149 | &align, &size); | ||
148 | align >>= PAGE_SHIFT; | 150 | align >>= PAGE_SHIFT; |
149 | 151 | ||
150 | nouveau_bo_placement_set(nvbo, flags, 0); | 152 | nouveau_bo_placement_set(nvbo, flags, 0); |
@@ -176,6 +178,31 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) | |||
176 | pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; | 178 | pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; |
177 | } | 179 | } |
178 | 180 | ||
181 | static void | ||
182 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) | ||
183 | { | ||
184 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | ||
185 | |||
186 | if (dev_priv->card_type == NV_10 && | ||
187 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) { | ||
188 | /* | ||
189 | * Make sure that the color and depth buffers are handled | ||
190 | * by independent memory controller units. Up to a 9x | ||
191 | * speed up when alpha-blending and depth-test are enabled | ||
192 | * at the same time. | ||
193 | */ | ||
194 | int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; | ||
195 | |||
196 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { | ||
197 | nvbo->placement.fpfn = vram_pages / 2; | ||
198 | nvbo->placement.lpfn = ~0; | ||
199 | } else { | ||
200 | nvbo->placement.fpfn = 0; | ||
201 | nvbo->placement.lpfn = vram_pages / 2; | ||
202 | } | ||
203 | } | ||
204 | } | ||
205 | |||
179 | void | 206 | void |
180 | nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) | 207 | nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) |
181 | { | 208 | { |
@@ -190,6 +217,8 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) | |||
190 | pl->busy_placement = nvbo->busy_placements; | 217 | pl->busy_placement = nvbo->busy_placements; |
191 | set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, | 218 | set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, |
192 | type | busy, flags); | 219 | type | busy, flags); |
220 | |||
221 | set_placement_range(nvbo, type); | ||
193 | } | 222 | } |
194 | 223 | ||
195 | int | 224 | int |
@@ -525,7 +554,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
525 | stride = 16 * 4; | 554 | stride = 16 * 4; |
526 | height = amount / stride; | 555 | height = amount / stride; |
527 | 556 | ||
528 | if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { | 557 | if (new_mem->mem_type == TTM_PL_VRAM && |
558 | nouveau_bo_tile_layout(nvbo)) { | ||
529 | ret = RING_SPACE(chan, 8); | 559 | ret = RING_SPACE(chan, 8); |
530 | if (ret) | 560 | if (ret) |
531 | return ret; | 561 | return ret; |
@@ -546,7 +576,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
546 | BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); | 576 | BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); |
547 | OUT_RING (chan, 1); | 577 | OUT_RING (chan, 1); |
548 | } | 578 | } |
549 | if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { | 579 | if (old_mem->mem_type == TTM_PL_VRAM && |
580 | nouveau_bo_tile_layout(nvbo)) { | ||
550 | ret = RING_SPACE(chan, 8); | 581 | ret = RING_SPACE(chan, 8); |
551 | if (ret) | 582 | if (ret) |
552 | return ret; | 583 | return ret; |
@@ -753,7 +784,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, | |||
753 | if (dev_priv->card_type == NV_50) { | 784 | if (dev_priv->card_type == NV_50) { |
754 | ret = nv50_mem_vm_bind_linear(dev, | 785 | ret = nv50_mem_vm_bind_linear(dev, |
755 | offset + dev_priv->vm_vram_base, | 786 | offset + dev_priv->vm_vram_base, |
756 | new_mem->size, nvbo->tile_flags, | 787 | new_mem->size, |
788 | nouveau_bo_tile_layout(nvbo), | ||
757 | offset); | 789 | offset); |
758 | if (ret) | 790 | if (ret) |
759 | return ret; | 791 | return ret; |
@@ -894,7 +926,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
894 | * nothing to do here. | 926 | * nothing to do here. |
895 | */ | 927 | */ |
896 | if (bo->mem.mem_type != TTM_PL_VRAM) { | 928 | if (bo->mem.mem_type != TTM_PL_VRAM) { |
897 | if (dev_priv->card_type < NV_50 || !nvbo->tile_flags) | 929 | if (dev_priv->card_type < NV_50 || |
930 | !nouveau_bo_tile_layout(nvbo)) | ||
898 | return 0; | 931 | return 0; |
899 | } | 932 | } |
900 | 933 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 0871495096fa..52c356e9a3d1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -281,7 +281,7 @@ detect_analog: | |||
281 | nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); | 281 | nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); |
282 | if (!nv_encoder && !nouveau_tv_disable) | 282 | if (!nv_encoder && !nouveau_tv_disable) |
283 | nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); | 283 | nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); |
284 | if (nv_encoder) { | 284 | if (nv_encoder && force) { |
285 | struct drm_encoder *encoder = to_drm_encoder(nv_encoder); | 285 | struct drm_encoder *encoder = to_drm_encoder(nv_encoder); |
286 | struct drm_encoder_helper_funcs *helper = | 286 | struct drm_encoder_helper_funcs *helper = |
287 | encoder->helper_private; | 287 | encoder->helper_private; |
@@ -641,11 +641,28 @@ nouveau_connector_get_modes(struct drm_connector *connector) | |||
641 | return ret; | 641 | return ret; |
642 | } | 642 | } |
643 | 643 | ||
644 | static unsigned | ||
645 | get_tmds_link_bandwidth(struct drm_connector *connector) | ||
646 | { | ||
647 | struct nouveau_connector *nv_connector = nouveau_connector(connector); | ||
648 | struct drm_nouveau_private *dev_priv = connector->dev->dev_private; | ||
649 | struct dcb_entry *dcb = nv_connector->detected_encoder->dcb; | ||
650 | |||
651 | if (dcb->location != DCB_LOC_ON_CHIP || | ||
652 | dev_priv->chipset >= 0x46) | ||
653 | return 165000; | ||
654 | else if (dev_priv->chipset >= 0x40) | ||
655 | return 155000; | ||
656 | else if (dev_priv->chipset >= 0x18) | ||
657 | return 135000; | ||
658 | else | ||
659 | return 112000; | ||
660 | } | ||
661 | |||
644 | static int | 662 | static int |
645 | nouveau_connector_mode_valid(struct drm_connector *connector, | 663 | nouveau_connector_mode_valid(struct drm_connector *connector, |
646 | struct drm_display_mode *mode) | 664 | struct drm_display_mode *mode) |
647 | { | 665 | { |
648 | struct drm_nouveau_private *dev_priv = connector->dev->dev_private; | ||
649 | struct nouveau_connector *nv_connector = nouveau_connector(connector); | 666 | struct nouveau_connector *nv_connector = nouveau_connector(connector); |
650 | struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; | 667 | struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; |
651 | struct drm_encoder *encoder = to_drm_encoder(nv_encoder); | 668 | struct drm_encoder *encoder = to_drm_encoder(nv_encoder); |
@@ -663,11 +680,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector, | |||
663 | max_clock = 400000; | 680 | max_clock = 400000; |
664 | break; | 681 | break; |
665 | case OUTPUT_TMDS: | 682 | case OUTPUT_TMDS: |
666 | if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || | 683 | max_clock = get_tmds_link_bandwidth(connector); |
667 | !nv_encoder->dcb->duallink_possible) | 684 | if (nouveau_duallink && nv_encoder->dcb->duallink_possible) |
668 | max_clock = 165000; | 685 | max_clock *= 2; |
669 | else | ||
670 | max_clock = 330000; | ||
671 | break; | 686 | break; |
672 | case OUTPUT_ANALOG: | 687 | case OUTPUT_ANALOG: |
673 | max_clock = nv_encoder->dcb->crtconf.maxfreq; | 688 | max_clock = nv_encoder->dcb->crtconf.maxfreq; |
@@ -709,44 +724,6 @@ nouveau_connector_best_encoder(struct drm_connector *connector) | |||
709 | return NULL; | 724 | return NULL; |
710 | } | 725 | } |
711 | 726 | ||
712 | void | ||
713 | nouveau_connector_set_polling(struct drm_connector *connector) | ||
714 | { | ||
715 | struct drm_device *dev = connector->dev; | ||
716 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
717 | struct drm_crtc *crtc; | ||
718 | bool spare_crtc = false; | ||
719 | |||
720 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | ||
721 | spare_crtc |= !crtc->enabled; | ||
722 | |||
723 | connector->polled = 0; | ||
724 | |||
725 | switch (connector->connector_type) { | ||
726 | case DRM_MODE_CONNECTOR_VGA: | ||
727 | case DRM_MODE_CONNECTOR_TV: | ||
728 | if (dev_priv->card_type >= NV_50 || | ||
729 | (nv_gf4_disp_arch(dev) && spare_crtc)) | ||
730 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
731 | break; | ||
732 | |||
733 | case DRM_MODE_CONNECTOR_DVII: | ||
734 | case DRM_MODE_CONNECTOR_DVID: | ||
735 | case DRM_MODE_CONNECTOR_HDMIA: | ||
736 | case DRM_MODE_CONNECTOR_DisplayPort: | ||
737 | case DRM_MODE_CONNECTOR_eDP: | ||
738 | if (dev_priv->card_type >= NV_50) | ||
739 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
740 | else if (connector->connector_type == DRM_MODE_CONNECTOR_DVID || | ||
741 | spare_crtc) | ||
742 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
743 | break; | ||
744 | |||
745 | default: | ||
746 | break; | ||
747 | } | ||
748 | } | ||
749 | |||
750 | static const struct drm_connector_helper_funcs | 727 | static const struct drm_connector_helper_funcs |
751 | nouveau_connector_helper_funcs = { | 728 | nouveau_connector_helper_funcs = { |
752 | .get_modes = nouveau_connector_get_modes, | 729 | .get_modes = nouveau_connector_get_modes, |
@@ -872,6 +849,7 @@ nouveau_connector_create(struct drm_device *dev, int index) | |||
872 | dev->mode_config.scaling_mode_property, | 849 | dev->mode_config.scaling_mode_property, |
873 | nv_connector->scaling_mode); | 850 | nv_connector->scaling_mode); |
874 | } | 851 | } |
852 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
875 | /* fall-through */ | 853 | /* fall-through */ |
876 | case DCB_CONNECTOR_TV_0: | 854 | case DCB_CONNECTOR_TV_0: |
877 | case DCB_CONNECTOR_TV_1: | 855 | case DCB_CONNECTOR_TV_1: |
@@ -888,11 +866,16 @@ nouveau_connector_create(struct drm_device *dev, int index) | |||
888 | dev->mode_config.dithering_mode_property, | 866 | dev->mode_config.dithering_mode_property, |
889 | nv_connector->use_dithering ? | 867 | nv_connector->use_dithering ? |
890 | DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); | 868 | DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); |
869 | |||
870 | if (dcb->type != DCB_CONNECTOR_LVDS) { | ||
871 | if (dev_priv->card_type >= NV_50) | ||
872 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
873 | else | ||
874 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
875 | } | ||
891 | break; | 876 | break; |
892 | } | 877 | } |
893 | 878 | ||
894 | nouveau_connector_set_polling(connector); | ||
895 | |||
896 | drm_sysfs_connector_add(connector); | 879 | drm_sysfs_connector_add(connector); |
897 | dcb->drm = connector; | 880 | dcb->drm = connector; |
898 | return dcb->drm; | 881 | return dcb->drm; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index c21ed6b16f88..711b1e9203af 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h | |||
@@ -52,9 +52,6 @@ static inline struct nouveau_connector *nouveau_connector( | |||
52 | struct drm_connector * | 52 | struct drm_connector * |
53 | nouveau_connector_create(struct drm_device *, int index); | 53 | nouveau_connector_create(struct drm_device *, int index); |
54 | 54 | ||
55 | void | ||
56 | nouveau_connector_set_polling(struct drm_connector *); | ||
57 | |||
58 | int | 55 | int |
59 | nouveau_connector_bpp(struct drm_connector *); | 56 | nouveau_connector_bpp(struct drm_connector *); |
60 | 57 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 3a07e580d27a..1c7db64c03bf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -100,6 +100,9 @@ struct nouveau_bo { | |||
100 | int pin_refcnt; | 100 | int pin_refcnt; |
101 | }; | 101 | }; |
102 | 102 | ||
103 | #define nouveau_bo_tile_layout(nvbo) \ | ||
104 | ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) | ||
105 | |||
103 | static inline struct nouveau_bo * | 106 | static inline struct nouveau_bo * |
104 | nouveau_bo(struct ttm_buffer_object *bo) | 107 | nouveau_bo(struct ttm_buffer_object *bo) |
105 | { | 108 | { |
@@ -304,6 +307,7 @@ struct nouveau_fifo_engine { | |||
304 | void (*destroy_context)(struct nouveau_channel *); | 307 | void (*destroy_context)(struct nouveau_channel *); |
305 | int (*load_context)(struct nouveau_channel *); | 308 | int (*load_context)(struct nouveau_channel *); |
306 | int (*unload_context)(struct drm_device *); | 309 | int (*unload_context)(struct drm_device *); |
310 | void (*tlb_flush)(struct drm_device *dev); | ||
307 | }; | 311 | }; |
308 | 312 | ||
309 | struct nouveau_pgraph_object_method { | 313 | struct nouveau_pgraph_object_method { |
@@ -336,6 +340,7 @@ struct nouveau_pgraph_engine { | |||
336 | void (*destroy_context)(struct nouveau_channel *); | 340 | void (*destroy_context)(struct nouveau_channel *); |
337 | int (*load_context)(struct nouveau_channel *); | 341 | int (*load_context)(struct nouveau_channel *); |
338 | int (*unload_context)(struct drm_device *); | 342 | int (*unload_context)(struct drm_device *); |
343 | void (*tlb_flush)(struct drm_device *dev); | ||
339 | 344 | ||
340 | void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, | 345 | void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, |
341 | uint32_t size, uint32_t pitch); | 346 | uint32_t size, uint32_t pitch); |
@@ -485,13 +490,13 @@ enum nv04_fp_display_regs { | |||
485 | }; | 490 | }; |
486 | 491 | ||
487 | struct nv04_crtc_reg { | 492 | struct nv04_crtc_reg { |
488 | unsigned char MiscOutReg; /* */ | 493 | unsigned char MiscOutReg; |
489 | uint8_t CRTC[0xa0]; | 494 | uint8_t CRTC[0xa0]; |
490 | uint8_t CR58[0x10]; | 495 | uint8_t CR58[0x10]; |
491 | uint8_t Sequencer[5]; | 496 | uint8_t Sequencer[5]; |
492 | uint8_t Graphics[9]; | 497 | uint8_t Graphics[9]; |
493 | uint8_t Attribute[21]; | 498 | uint8_t Attribute[21]; |
494 | unsigned char DAC[768]; /* Internal Colorlookuptable */ | 499 | unsigned char DAC[768]; |
495 | 500 | ||
496 | /* PCRTC regs */ | 501 | /* PCRTC regs */ |
497 | uint32_t fb_start; | 502 | uint32_t fb_start; |
@@ -539,43 +544,9 @@ struct nv04_output_reg { | |||
539 | }; | 544 | }; |
540 | 545 | ||
541 | struct nv04_mode_state { | 546 | struct nv04_mode_state { |
542 | uint32_t bpp; | 547 | struct nv04_crtc_reg crtc_reg[2]; |
543 | uint32_t width; | ||
544 | uint32_t height; | ||
545 | uint32_t interlace; | ||
546 | uint32_t repaint0; | ||
547 | uint32_t repaint1; | ||
548 | uint32_t screen; | ||
549 | uint32_t scale; | ||
550 | uint32_t dither; | ||
551 | uint32_t extra; | ||
552 | uint32_t fifo; | ||
553 | uint32_t pixel; | ||
554 | uint32_t horiz; | ||
555 | int arbitration0; | ||
556 | int arbitration1; | ||
557 | uint32_t pll; | ||
558 | uint32_t pllB; | ||
559 | uint32_t vpll; | ||
560 | uint32_t vpll2; | ||
561 | uint32_t vpllB; | ||
562 | uint32_t vpll2B; | ||
563 | uint32_t pllsel; | 548 | uint32_t pllsel; |
564 | uint32_t sel_clk; | 549 | uint32_t sel_clk; |
565 | uint32_t general; | ||
566 | uint32_t crtcOwner; | ||
567 | uint32_t head; | ||
568 | uint32_t head2; | ||
569 | uint32_t cursorConfig; | ||
570 | uint32_t cursor0; | ||
571 | uint32_t cursor1; | ||
572 | uint32_t cursor2; | ||
573 | uint32_t timingH; | ||
574 | uint32_t timingV; | ||
575 | uint32_t displayV; | ||
576 | uint32_t crtcSync; | ||
577 | |||
578 | struct nv04_crtc_reg crtc_reg[2]; | ||
579 | }; | 550 | }; |
580 | 551 | ||
581 | enum nouveau_card_type { | 552 | enum nouveau_card_type { |
@@ -613,6 +584,12 @@ struct drm_nouveau_private { | |||
613 | struct work_struct irq_work; | 584 | struct work_struct irq_work; |
614 | struct work_struct hpd_work; | 585 | struct work_struct hpd_work; |
615 | 586 | ||
587 | struct { | ||
588 | spinlock_t lock; | ||
589 | uint32_t hpd0_bits; | ||
590 | uint32_t hpd1_bits; | ||
591 | } hpd_state; | ||
592 | |||
616 | struct list_head vbl_waiting; | 593 | struct list_head vbl_waiting; |
617 | 594 | ||
618 | struct { | 595 | struct { |
@@ -1045,6 +1022,7 @@ extern int nv50_fifo_create_context(struct nouveau_channel *); | |||
1045 | extern void nv50_fifo_destroy_context(struct nouveau_channel *); | 1022 | extern void nv50_fifo_destroy_context(struct nouveau_channel *); |
1046 | extern int nv50_fifo_load_context(struct nouveau_channel *); | 1023 | extern int nv50_fifo_load_context(struct nouveau_channel *); |
1047 | extern int nv50_fifo_unload_context(struct drm_device *); | 1024 | extern int nv50_fifo_unload_context(struct drm_device *); |
1025 | extern void nv50_fifo_tlb_flush(struct drm_device *dev); | ||
1048 | 1026 | ||
1049 | /* nvc0_fifo.c */ | 1027 | /* nvc0_fifo.c */ |
1050 | extern int nvc0_fifo_init(struct drm_device *); | 1028 | extern int nvc0_fifo_init(struct drm_device *); |
@@ -1122,6 +1100,8 @@ extern int nv50_graph_load_context(struct nouveau_channel *); | |||
1122 | extern int nv50_graph_unload_context(struct drm_device *); | 1100 | extern int nv50_graph_unload_context(struct drm_device *); |
1123 | extern void nv50_graph_context_switch(struct drm_device *); | 1101 | extern void nv50_graph_context_switch(struct drm_device *); |
1124 | extern int nv50_grctx_init(struct nouveau_grctx *); | 1102 | extern int nv50_grctx_init(struct nouveau_grctx *); |
1103 | extern void nv50_graph_tlb_flush(struct drm_device *dev); | ||
1104 | extern void nv86_graph_tlb_flush(struct drm_device *dev); | ||
1125 | 1105 | ||
1126 | /* nvc0_graph.c */ | 1106 | /* nvc0_graph.c */ |
1127 | extern int nvc0_graph_init(struct drm_device *); | 1107 | extern int nvc0_graph_init(struct drm_device *); |
@@ -1239,7 +1219,6 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); | |||
1239 | extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); | 1219 | extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); |
1240 | extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); | 1220 | extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); |
1241 | extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); | 1221 | extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); |
1242 | extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *); | ||
1243 | 1222 | ||
1244 | /* nouveau_fence.c */ | 1223 | /* nouveau_fence.c */ |
1245 | struct nouveau_fence; | 1224 | struct nouveau_fence; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 441b12420bb1..ab1bbfbf266e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -249,6 +249,7 @@ alloc_semaphore(struct drm_device *dev) | |||
249 | { | 249 | { |
250 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 250 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
251 | struct nouveau_semaphore *sema; | 251 | struct nouveau_semaphore *sema; |
252 | int ret; | ||
252 | 253 | ||
253 | if (!USE_SEMA(dev)) | 254 | if (!USE_SEMA(dev)) |
254 | return NULL; | 255 | return NULL; |
@@ -257,10 +258,14 @@ alloc_semaphore(struct drm_device *dev) | |||
257 | if (!sema) | 258 | if (!sema) |
258 | goto fail; | 259 | goto fail; |
259 | 260 | ||
261 | ret = drm_mm_pre_get(&dev_priv->fence.heap); | ||
262 | if (ret) | ||
263 | goto fail; | ||
264 | |||
260 | spin_lock(&dev_priv->fence.lock); | 265 | spin_lock(&dev_priv->fence.lock); |
261 | sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0); | 266 | sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0); |
262 | if (sema->mem) | 267 | if (sema->mem) |
263 | sema->mem = drm_mm_get_block(sema->mem, 4, 0); | 268 | sema->mem = drm_mm_get_block_atomic(sema->mem, 4, 0); |
264 | spin_unlock(&dev_priv->fence.lock); | 269 | spin_unlock(&dev_priv->fence.lock); |
265 | 270 | ||
266 | if (!sema->mem) | 271 | if (!sema->mem) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 5c4c929d7f74..9a1fdcf400c2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -107,23 +107,29 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) | |||
107 | } | 107 | } |
108 | 108 | ||
109 | static bool | 109 | static bool |
110 | nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) { | 110 | nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) |
111 | switch (tile_flags) { | 111 | { |
112 | case 0x0000: | 112 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
113 | case 0x1800: | 113 | |
114 | case 0x2800: | 114 | if (dev_priv->card_type >= NV_50) { |
115 | case 0x4800: | 115 | switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) { |
116 | case 0x7000: | 116 | case 0x0000: |
117 | case 0x7400: | 117 | case 0x1800: |
118 | case 0x7a00: | 118 | case 0x2800: |
119 | case 0xe000: | 119 | case 0x4800: |
120 | break; | 120 | case 0x7000: |
121 | default: | 121 | case 0x7400: |
122 | NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); | 122 | case 0x7a00: |
123 | return false; | 123 | case 0xe000: |
124 | return true; | ||
125 | } | ||
126 | } else { | ||
127 | if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)) | ||
128 | return true; | ||
124 | } | 129 | } |
125 | 130 | ||
126 | return true; | 131 | NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); |
132 | return false; | ||
127 | } | 133 | } |
128 | 134 | ||
129 | int | 135 | int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c index bed669a54a2d..b9672a05c411 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.c +++ b/drivers/gpu/drm/nouveau/nouveau_hw.c | |||
@@ -519,11 +519,11 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) | |||
519 | 519 | ||
520 | struct pll_lims pll_lim; | 520 | struct pll_lims pll_lim; |
521 | struct nouveau_pll_vals pv; | 521 | struct nouveau_pll_vals pv; |
522 | uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; | 522 | enum pll_types pll = head ? PLL_VPLL1 : PLL_VPLL0; |
523 | 523 | ||
524 | if (get_pll_limits(dev, pllreg, &pll_lim)) | 524 | if (get_pll_limits(dev, pll, &pll_lim)) |
525 | return; | 525 | return; |
526 | nouveau_hw_get_pllvals(dev, pllreg, &pv); | 526 | nouveau_hw_get_pllvals(dev, pll, &pv); |
527 | 527 | ||
528 | if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && | 528 | if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && |
529 | pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && | 529 | pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && |
@@ -536,7 +536,7 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) | |||
536 | pv.M1 = pll_lim.vco1.max_m; | 536 | pv.M1 = pll_lim.vco1.max_m; |
537 | pv.N1 = pll_lim.vco1.min_n; | 537 | pv.N1 = pll_lim.vco1.min_n; |
538 | pv.log2P = pll_lim.max_usable_log2p; | 538 | pv.log2P = pll_lim.max_usable_log2p; |
539 | nouveau_hw_setpll(dev, pllreg, &pv); | 539 | nouveau_hw_setpll(dev, pll_lim.reg, &pv); |
540 | } | 540 | } |
541 | 541 | ||
542 | /* | 542 | /* |
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h index 869130f83602..2989090b9434 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.h +++ b/drivers/gpu/drm/nouveau/nouveau_hw.h | |||
@@ -416,6 +416,25 @@ nv_fix_nv40_hw_cursor(struct drm_device *dev, int head) | |||
416 | } | 416 | } |
417 | 417 | ||
418 | static inline void | 418 | static inline void |
419 | nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset) | ||
420 | { | ||
421 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
422 | |||
423 | NVWriteCRTC(dev, head, NV_PCRTC_START, offset); | ||
424 | |||
425 | if (dev_priv->card_type == NV_04) { | ||
426 | /* | ||
427 | * Hilarious, the 24th bit doesn't want to stick to | ||
428 | * PCRTC_START... | ||
429 | */ | ||
430 | int cre_heb = NVReadVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX); | ||
431 | |||
432 | NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX, | ||
433 | (cre_heb & ~0x40) | ((offset >> 18) & 0x40)); | ||
434 | } | ||
435 | } | ||
436 | |||
437 | static inline void | ||
419 | nv_show_cursor(struct drm_device *dev, int head, bool show) | 438 | nv_show_cursor(struct drm_device *dev, int head, bool show) |
420 | { | 439 | { |
421 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 440 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index fdd7e3de79c8..cb389d014326 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c | |||
@@ -256,7 +256,7 @@ nouveau_i2c_find(struct drm_device *dev, int index) | |||
256 | if (index >= DCB_MAX_NUM_I2C_ENTRIES) | 256 | if (index >= DCB_MAX_NUM_I2C_ENTRIES) |
257 | return NULL; | 257 | return NULL; |
258 | 258 | ||
259 | if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) { | 259 | if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) { |
260 | uint32_t reg = 0xe500, val; | 260 | uint32_t reg = 0xe500, val; |
261 | 261 | ||
262 | if (i2c->port_type == 6) { | 262 | if (i2c->port_type == 6) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 6fd51a51c608..7bfd9e6c9d67 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c | |||
@@ -42,6 +42,13 @@ | |||
42 | #include "nouveau_connector.h" | 42 | #include "nouveau_connector.h" |
43 | #include "nv50_display.h" | 43 | #include "nv50_display.h" |
44 | 44 | ||
45 | static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); | ||
46 | |||
47 | static int nouveau_ratelimit(void) | ||
48 | { | ||
49 | return __ratelimit(&nouveau_ratelimit_state); | ||
50 | } | ||
51 | |||
45 | void | 52 | void |
46 | nouveau_irq_preinstall(struct drm_device *dev) | 53 | nouveau_irq_preinstall(struct drm_device *dev) |
47 | { | 54 | { |
@@ -53,6 +60,7 @@ nouveau_irq_preinstall(struct drm_device *dev) | |||
53 | if (dev_priv->card_type >= NV_50) { | 60 | if (dev_priv->card_type >= NV_50) { |
54 | INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); | 61 | INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); |
55 | INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); | 62 | INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); |
63 | spin_lock_init(&dev_priv->hpd_state.lock); | ||
56 | INIT_LIST_HEAD(&dev_priv->vbl_waiting); | 64 | INIT_LIST_HEAD(&dev_priv->vbl_waiting); |
57 | } | 65 | } |
58 | } | 66 | } |
@@ -202,8 +210,8 @@ nouveau_fifo_irq_handler(struct drm_device *dev) | |||
202 | } | 210 | } |
203 | 211 | ||
204 | if (status & NV_PFIFO_INTR_DMA_PUSHER) { | 212 | if (status & NV_PFIFO_INTR_DMA_PUSHER) { |
205 | u32 get = nv_rd32(dev, 0x003244); | 213 | u32 dma_get = nv_rd32(dev, 0x003244); |
206 | u32 put = nv_rd32(dev, 0x003240); | 214 | u32 dma_put = nv_rd32(dev, 0x003240); |
207 | u32 push = nv_rd32(dev, 0x003220); | 215 | u32 push = nv_rd32(dev, 0x003220); |
208 | u32 state = nv_rd32(dev, 0x003228); | 216 | u32 state = nv_rd32(dev, 0x003228); |
209 | 217 | ||
@@ -213,16 +221,18 @@ nouveau_fifo_irq_handler(struct drm_device *dev) | |||
213 | u32 ib_get = nv_rd32(dev, 0x003334); | 221 | u32 ib_get = nv_rd32(dev, 0x003334); |
214 | u32 ib_put = nv_rd32(dev, 0x003330); | 222 | u32 ib_put = nv_rd32(dev, 0x003330); |
215 | 223 | ||
216 | NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " | 224 | if (nouveau_ratelimit()) |
225 | NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " | ||
217 | "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " | 226 | "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " |
218 | "State 0x%08x Push 0x%08x\n", | 227 | "State 0x%08x Push 0x%08x\n", |
219 | chid, ho_get, get, ho_put, put, ib_get, ib_put, | 228 | chid, ho_get, dma_get, ho_put, |
220 | state, push); | 229 | dma_put, ib_get, ib_put, state, |
230 | push); | ||
221 | 231 | ||
222 | /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ | 232 | /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ |
223 | nv_wr32(dev, 0x003364, 0x00000000); | 233 | nv_wr32(dev, 0x003364, 0x00000000); |
224 | if (get != put || ho_get != ho_put) { | 234 | if (dma_get != dma_put || ho_get != ho_put) { |
225 | nv_wr32(dev, 0x003244, put); | 235 | nv_wr32(dev, 0x003244, dma_put); |
226 | nv_wr32(dev, 0x003328, ho_put); | 236 | nv_wr32(dev, 0x003328, ho_put); |
227 | } else | 237 | } else |
228 | if (ib_get != ib_put) { | 238 | if (ib_get != ib_put) { |
@@ -231,10 +241,10 @@ nouveau_fifo_irq_handler(struct drm_device *dev) | |||
231 | } else { | 241 | } else { |
232 | NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " | 242 | NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " |
233 | "Put 0x%08x State 0x%08x Push 0x%08x\n", | 243 | "Put 0x%08x State 0x%08x Push 0x%08x\n", |
234 | chid, get, put, state, push); | 244 | chid, dma_get, dma_put, state, push); |
235 | 245 | ||
236 | if (get != put) | 246 | if (dma_get != dma_put) |
237 | nv_wr32(dev, 0x003244, put); | 247 | nv_wr32(dev, 0x003244, dma_put); |
238 | } | 248 | } |
239 | 249 | ||
240 | nv_wr32(dev, 0x003228, 0x00000000); | 250 | nv_wr32(dev, 0x003228, 0x00000000); |
@@ -266,8 +276,9 @@ nouveau_fifo_irq_handler(struct drm_device *dev) | |||
266 | } | 276 | } |
267 | 277 | ||
268 | if (status) { | 278 | if (status) { |
269 | NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", | 279 | if (nouveau_ratelimit()) |
270 | status, chid); | 280 | NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", |
281 | status, chid); | ||
271 | nv_wr32(dev, NV03_PFIFO_INTR_0, status); | 282 | nv_wr32(dev, NV03_PFIFO_INTR_0, status); |
272 | status = 0; | 283 | status = 0; |
273 | } | 284 | } |
@@ -544,13 +555,6 @@ nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource) | |||
544 | nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); | 555 | nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); |
545 | } | 556 | } |
546 | 557 | ||
547 | static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); | ||
548 | |||
549 | static int nouveau_ratelimit(void) | ||
550 | { | ||
551 | return __ratelimit(&nouveau_ratelimit_state); | ||
552 | } | ||
553 | |||
554 | 558 | ||
555 | static inline void | 559 | static inline void |
556 | nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) | 560 | nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index a163c7c612e7..fe4a30dc4b42 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -33,9 +33,9 @@ | |||
33 | #include "drmP.h" | 33 | #include "drmP.h" |
34 | #include "drm.h" | 34 | #include "drm.h" |
35 | #include "drm_sarea.h" | 35 | #include "drm_sarea.h" |
36 | #include "nouveau_drv.h" | ||
37 | 36 | ||
38 | #define MIN(a,b) a < b ? a : b | 37 | #include "nouveau_drv.h" |
38 | #include "nouveau_pm.h" | ||
39 | 39 | ||
40 | /* | 40 | /* |
41 | * NV10-NV40 tiling helpers | 41 | * NV10-NV40 tiling helpers |
@@ -175,11 +175,10 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | |||
175 | } | 175 | } |
176 | } | 176 | } |
177 | } | 177 | } |
178 | dev_priv->engine.instmem.flush(dev); | ||
179 | 178 | ||
180 | nv50_vm_flush(dev, 5); | 179 | dev_priv->engine.instmem.flush(dev); |
181 | nv50_vm_flush(dev, 0); | 180 | dev_priv->engine.fifo.tlb_flush(dev); |
182 | nv50_vm_flush(dev, 4); | 181 | dev_priv->engine.graph.tlb_flush(dev); |
183 | nv50_vm_flush(dev, 6); | 182 | nv50_vm_flush(dev, 6); |
184 | return 0; | 183 | return 0; |
185 | } | 184 | } |
@@ -209,11 +208,10 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) | |||
209 | pte++; | 208 | pte++; |
210 | } | 209 | } |
211 | } | 210 | } |
212 | dev_priv->engine.instmem.flush(dev); | ||
213 | 211 | ||
214 | nv50_vm_flush(dev, 5); | 212 | dev_priv->engine.instmem.flush(dev); |
215 | nv50_vm_flush(dev, 0); | 213 | dev_priv->engine.fifo.tlb_flush(dev); |
216 | nv50_vm_flush(dev, 4); | 214 | dev_priv->engine.graph.tlb_flush(dev); |
217 | nv50_vm_flush(dev, 6); | 215 | nv50_vm_flush(dev, 6); |
218 | } | 216 | } |
219 | 217 | ||
@@ -653,6 +651,7 @@ nouveau_mem_gart_init(struct drm_device *dev) | |||
653 | void | 651 | void |
654 | nouveau_mem_timing_init(struct drm_device *dev) | 652 | nouveau_mem_timing_init(struct drm_device *dev) |
655 | { | 653 | { |
654 | /* cards < NVC0 only */ | ||
656 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 655 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
657 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 656 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
658 | struct nouveau_pm_memtimings *memtimings = &pm->memtimings; | 657 | struct nouveau_pm_memtimings *memtimings = &pm->memtimings; |
@@ -719,14 +718,14 @@ nouveau_mem_timing_init(struct drm_device *dev) | |||
719 | tUNK_19 = 1; | 718 | tUNK_19 = 1; |
720 | tUNK_20 = 0; | 719 | tUNK_20 = 0; |
721 | tUNK_21 = 0; | 720 | tUNK_21 = 0; |
722 | switch (MIN(recordlen,21)) { | 721 | switch (min(recordlen, 22)) { |
723 | case 21: | 722 | case 22: |
724 | tUNK_21 = entry[21]; | 723 | tUNK_21 = entry[21]; |
725 | case 20: | 724 | case 21: |
726 | tUNK_20 = entry[20]; | 725 | tUNK_20 = entry[20]; |
727 | case 19: | 726 | case 20: |
728 | tUNK_19 = entry[19]; | 727 | tUNK_19 = entry[19]; |
729 | case 18: | 728 | case 19: |
730 | tUNK_18 = entry[18]; | 729 | tUNK_18 = entry[18]; |
731 | default: | 730 | default: |
732 | tUNK_0 = entry[0]; | 731 | tUNK_0 = entry[0]; |
@@ -756,24 +755,30 @@ nouveau_mem_timing_init(struct drm_device *dev) | |||
756 | timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); | 755 | timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); |
757 | if(recordlen > 19) { | 756 | if(recordlen > 19) { |
758 | timing->reg_100228 += (tUNK_19 - 1) << 24; | 757 | timing->reg_100228 += (tUNK_19 - 1) << 24; |
759 | } else { | 758 | }/* I cannot back-up this else-statement right now |
759 | else { | ||
760 | timing->reg_100228 += tUNK_12 << 24; | 760 | timing->reg_100228 += tUNK_12 << 24; |
761 | } | 761 | }*/ |
762 | 762 | ||
763 | /* XXX: reg_10022c */ | 763 | /* XXX: reg_10022c */ |
764 | timing->reg_10022c = tUNK_2 - 1; | ||
764 | 765 | ||
765 | timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | | 766 | timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | |
766 | tUNK_13 << 8 | tUNK_13); | 767 | tUNK_13 << 8 | tUNK_13); |
767 | 768 | ||
768 | /* XXX: +6? */ | 769 | /* XXX: +6? */ |
769 | timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); | 770 | timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); |
770 | if(tUNK_10 > tUNK_11) { | 771 | timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; |
771 | timing->reg_100234 += tUNK_10 << 16; | 772 | |
772 | } else { | 773 | /* XXX; reg_100238, reg_10023c |
773 | timing->reg_100234 += tUNK_11 << 16; | 774 | * reg: 0x00?????? |
775 | * reg_10023c: | ||
776 | * 0 for pre-NV50 cards | ||
777 | * 0x????0202 for NV50+ cards (empirical evidence) */ | ||
778 | if(dev_priv->card_type >= NV_50) { | ||
779 | timing->reg_10023c = 0x202; | ||
774 | } | 780 | } |
775 | 781 | ||
776 | /* XXX; reg_100238, reg_10023c */ | ||
777 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, | 782 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, |
778 | timing->reg_100220, timing->reg_100224, | 783 | timing->reg_100220, timing->reg_100224, |
779 | timing->reg_100228, timing->reg_10022c); | 784 | timing->reg_100228, timing->reg_10022c); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 896cf8634144..dd572adca02a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -129,7 +129,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
129 | if (ramin == NULL) { | 129 | if (ramin == NULL) { |
130 | spin_unlock(&dev_priv->ramin_lock); | 130 | spin_unlock(&dev_priv->ramin_lock); |
131 | nouveau_gpuobj_ref(NULL, &gpuobj); | 131 | nouveau_gpuobj_ref(NULL, &gpuobj); |
132 | return ret; | 132 | return -ENOMEM; |
133 | } | 133 | } |
134 | 134 | ||
135 | ramin = drm_mm_get_block_atomic(ramin, size, align); | 135 | ramin = drm_mm_get_block_atomic(ramin, size, align); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index 1c99c55d6d46..9f7b158f5825 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c | |||
@@ -284,6 +284,7 @@ nouveau_sysfs_fini(struct drm_device *dev) | |||
284 | } | 284 | } |
285 | } | 285 | } |
286 | 286 | ||
287 | #ifdef CONFIG_HWMON | ||
287 | static ssize_t | 288 | static ssize_t |
288 | nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) | 289 | nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) |
289 | { | 290 | { |
@@ -395,10 +396,12 @@ static struct attribute *hwmon_attributes[] = { | |||
395 | static const struct attribute_group hwmon_attrgroup = { | 396 | static const struct attribute_group hwmon_attrgroup = { |
396 | .attrs = hwmon_attributes, | 397 | .attrs = hwmon_attributes, |
397 | }; | 398 | }; |
399 | #endif | ||
398 | 400 | ||
399 | static int | 401 | static int |
400 | nouveau_hwmon_init(struct drm_device *dev) | 402 | nouveau_hwmon_init(struct drm_device *dev) |
401 | { | 403 | { |
404 | #ifdef CONFIG_HWMON | ||
402 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 405 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
403 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 406 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
404 | struct device *hwmon_dev; | 407 | struct device *hwmon_dev; |
@@ -425,13 +428,14 @@ nouveau_hwmon_init(struct drm_device *dev) | |||
425 | } | 428 | } |
426 | 429 | ||
427 | pm->hwmon = hwmon_dev; | 430 | pm->hwmon = hwmon_dev; |
428 | 431 | #endif | |
429 | return 0; | 432 | return 0; |
430 | } | 433 | } |
431 | 434 | ||
432 | static void | 435 | static void |
433 | nouveau_hwmon_fini(struct drm_device *dev) | 436 | nouveau_hwmon_fini(struct drm_device *dev) |
434 | { | 437 | { |
438 | #ifdef CONFIG_HWMON | ||
435 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 439 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
436 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 440 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
437 | 441 | ||
@@ -439,6 +443,7 @@ nouveau_hwmon_fini(struct drm_device *dev) | |||
439 | sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); | 443 | sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); |
440 | hwmon_device_unregister(pm->hwmon); | 444 | hwmon_device_unregister(pm->hwmon); |
441 | } | 445 | } |
446 | #endif | ||
442 | } | 447 | } |
443 | 448 | ||
444 | int | 449 | int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c index 7f16697cc96c..2d8580927ca4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.c +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c | |||
@@ -153,26 +153,42 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, | |||
153 | return -ENOMEM; | 153 | return -ENOMEM; |
154 | } | 154 | } |
155 | 155 | ||
156 | static struct nouveau_ramht_entry * | ||
157 | nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle) | ||
158 | { | ||
159 | struct nouveau_ramht *ramht = chan ? chan->ramht : NULL; | ||
160 | struct nouveau_ramht_entry *entry; | ||
161 | unsigned long flags; | ||
162 | |||
163 | if (!ramht) | ||
164 | return NULL; | ||
165 | |||
166 | spin_lock_irqsave(&ramht->lock, flags); | ||
167 | list_for_each_entry(entry, &ramht->entries, head) { | ||
168 | if (entry->channel == chan && | ||
169 | (!handle || entry->handle == handle)) { | ||
170 | list_del(&entry->head); | ||
171 | spin_unlock_irqrestore(&ramht->lock, flags); | ||
172 | |||
173 | return entry; | ||
174 | } | ||
175 | } | ||
176 | spin_unlock_irqrestore(&ramht->lock, flags); | ||
177 | |||
178 | return NULL; | ||
179 | } | ||
180 | |||
156 | static void | 181 | static void |
157 | nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle) | 182 | nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle) |
158 | { | 183 | { |
159 | struct drm_device *dev = chan->dev; | 184 | struct drm_device *dev = chan->dev; |
160 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 185 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
161 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | 186 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; |
162 | struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; | 187 | struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; |
163 | struct nouveau_ramht_entry *entry, *tmp; | 188 | unsigned long flags; |
164 | u32 co, ho; | 189 | u32 co, ho; |
165 | 190 | ||
166 | list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) { | 191 | spin_lock_irqsave(&chan->ramht->lock, flags); |
167 | if (entry->channel != chan || entry->handle != handle) | ||
168 | continue; | ||
169 | |||
170 | nouveau_gpuobj_ref(NULL, &entry->gpuobj); | ||
171 | list_del(&entry->head); | ||
172 | kfree(entry); | ||
173 | break; | ||
174 | } | ||
175 | |||
176 | co = ho = nouveau_ramht_hash_handle(chan, handle); | 192 | co = ho = nouveau_ramht_hash_handle(chan, handle); |
177 | do { | 193 | do { |
178 | if (nouveau_ramht_entry_valid(dev, ramht, co) && | 194 | if (nouveau_ramht_entry_valid(dev, ramht, co) && |
@@ -184,7 +200,7 @@ nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle) | |||
184 | nv_wo32(ramht, co + 0, 0x00000000); | 200 | nv_wo32(ramht, co + 0, 0x00000000); |
185 | nv_wo32(ramht, co + 4, 0x00000000); | 201 | nv_wo32(ramht, co + 4, 0x00000000); |
186 | instmem->flush(dev); | 202 | instmem->flush(dev); |
187 | return; | 203 | goto out; |
188 | } | 204 | } |
189 | 205 | ||
190 | co += 8; | 206 | co += 8; |
@@ -194,17 +210,22 @@ nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle) | |||
194 | 210 | ||
195 | NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", | 211 | NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", |
196 | chan->id, handle); | 212 | chan->id, handle); |
213 | out: | ||
214 | spin_unlock_irqrestore(&chan->ramht->lock, flags); | ||
197 | } | 215 | } |
198 | 216 | ||
199 | void | 217 | void |
200 | nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) | 218 | nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) |
201 | { | 219 | { |
202 | struct nouveau_ramht *ramht = chan->ramht; | 220 | struct nouveau_ramht_entry *entry; |
203 | unsigned long flags; | ||
204 | 221 | ||
205 | spin_lock_irqsave(&ramht->lock, flags); | 222 | entry = nouveau_ramht_remove_entry(chan, handle); |
206 | nouveau_ramht_remove_locked(chan, handle); | 223 | if (!entry) |
207 | spin_unlock_irqrestore(&ramht->lock, flags); | 224 | return; |
225 | |||
226 | nouveau_ramht_remove_hash(chan, entry->handle); | ||
227 | nouveau_gpuobj_ref(NULL, &entry->gpuobj); | ||
228 | kfree(entry); | ||
208 | } | 229 | } |
209 | 230 | ||
210 | struct nouveau_gpuobj * | 231 | struct nouveau_gpuobj * |
@@ -265,23 +286,19 @@ void | |||
265 | nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr, | 286 | nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr, |
266 | struct nouveau_channel *chan) | 287 | struct nouveau_channel *chan) |
267 | { | 288 | { |
268 | struct nouveau_ramht_entry *entry, *tmp; | 289 | struct nouveau_ramht_entry *entry; |
269 | struct nouveau_ramht *ramht; | 290 | struct nouveau_ramht *ramht; |
270 | unsigned long flags; | ||
271 | 291 | ||
272 | if (ref) | 292 | if (ref) |
273 | kref_get(&ref->refcount); | 293 | kref_get(&ref->refcount); |
274 | 294 | ||
275 | ramht = *ptr; | 295 | ramht = *ptr; |
276 | if (ramht) { | 296 | if (ramht) { |
277 | spin_lock_irqsave(&ramht->lock, flags); | 297 | while ((entry = nouveau_ramht_remove_entry(chan, 0))) { |
278 | list_for_each_entry_safe(entry, tmp, &ramht->entries, head) { | 298 | nouveau_ramht_remove_hash(chan, entry->handle); |
279 | if (entry->channel != chan) | 299 | nouveau_gpuobj_ref(NULL, &entry->gpuobj); |
280 | continue; | 300 | kfree(entry); |
281 | |||
282 | nouveau_ramht_remove_locked(chan, entry->handle); | ||
283 | } | 301 | } |
284 | spin_unlock_irqrestore(&ramht->lock, flags); | ||
285 | 302 | ||
286 | kref_put(&ramht->refcount, nouveau_ramht_del); | 303 | kref_put(&ramht->refcount, nouveau_ramht_del); |
287 | } | 304 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 288bacac7e5a..d4ac97007038 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -120,8 +120,8 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | |||
120 | dev_priv->engine.instmem.flush(nvbe->dev); | 120 | dev_priv->engine.instmem.flush(nvbe->dev); |
121 | 121 | ||
122 | if (dev_priv->card_type == NV_50) { | 122 | if (dev_priv->card_type == NV_50) { |
123 | nv50_vm_flush(dev, 5); /* PGRAPH */ | 123 | dev_priv->engine.fifo.tlb_flush(dev); |
124 | nv50_vm_flush(dev, 0); /* PFIFO */ | 124 | dev_priv->engine.graph.tlb_flush(dev); |
125 | } | 125 | } |
126 | 126 | ||
127 | nvbe->bound = true; | 127 | nvbe->bound = true; |
@@ -162,8 +162,8 @@ nouveau_sgdma_unbind(struct ttm_backend *be) | |||
162 | dev_priv->engine.instmem.flush(nvbe->dev); | 162 | dev_priv->engine.instmem.flush(nvbe->dev); |
163 | 163 | ||
164 | if (dev_priv->card_type == NV_50) { | 164 | if (dev_priv->card_type == NV_50) { |
165 | nv50_vm_flush(dev, 5); | 165 | dev_priv->engine.fifo.tlb_flush(dev); |
166 | nv50_vm_flush(dev, 0); | 166 | dev_priv->engine.graph.tlb_flush(dev); |
167 | } | 167 | } |
168 | 168 | ||
169 | nvbe->bound = false; | 169 | nvbe->bound = false; |
@@ -224,7 +224,11 @@ nouveau_sgdma_init(struct drm_device *dev) | |||
224 | int i, ret; | 224 | int i, ret; |
225 | 225 | ||
226 | if (dev_priv->card_type < NV_50) { | 226 | if (dev_priv->card_type < NV_50) { |
227 | aper_size = (64 * 1024 * 1024); | 227 | if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024) |
228 | aper_size = 64 * 1024 * 1024; | ||
229 | else | ||
230 | aper_size = 512 * 1024 * 1024; | ||
231 | |||
228 | obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; | 232 | obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; |
229 | obj_size += 8; /* ctxdma header */ | 233 | obj_size += 8; /* ctxdma header */ |
230 | } else { | 234 | } else { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index ed7757f14083..049f755567e5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -354,6 +354,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
354 | engine->graph.destroy_context = nv50_graph_destroy_context; | 354 | engine->graph.destroy_context = nv50_graph_destroy_context; |
355 | engine->graph.load_context = nv50_graph_load_context; | 355 | engine->graph.load_context = nv50_graph_load_context; |
356 | engine->graph.unload_context = nv50_graph_unload_context; | 356 | engine->graph.unload_context = nv50_graph_unload_context; |
357 | if (dev_priv->chipset != 0x86) | ||
358 | engine->graph.tlb_flush = nv50_graph_tlb_flush; | ||
359 | else { | ||
360 | /* from what i can see nvidia do this on every | ||
361 | * pre-NVA3 board except NVAC, but, we've only | ||
362 | * ever seen problems on NV86 | ||
363 | */ | ||
364 | engine->graph.tlb_flush = nv86_graph_tlb_flush; | ||
365 | } | ||
357 | engine->fifo.channels = 128; | 366 | engine->fifo.channels = 128; |
358 | engine->fifo.init = nv50_fifo_init; | 367 | engine->fifo.init = nv50_fifo_init; |
359 | engine->fifo.takedown = nv50_fifo_takedown; | 368 | engine->fifo.takedown = nv50_fifo_takedown; |
@@ -365,6 +374,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
365 | engine->fifo.destroy_context = nv50_fifo_destroy_context; | 374 | engine->fifo.destroy_context = nv50_fifo_destroy_context; |
366 | engine->fifo.load_context = nv50_fifo_load_context; | 375 | engine->fifo.load_context = nv50_fifo_load_context; |
367 | engine->fifo.unload_context = nv50_fifo_unload_context; | 376 | engine->fifo.unload_context = nv50_fifo_unload_context; |
377 | engine->fifo.tlb_flush = nv50_fifo_tlb_flush; | ||
368 | engine->display.early_init = nv50_display_early_init; | 378 | engine->display.early_init = nv50_display_early_init; |
369 | engine->display.late_takedown = nv50_display_late_takedown; | 379 | engine->display.late_takedown = nv50_display_late_takedown; |
370 | engine->display.create = nv50_display_create; | 380 | engine->display.create = nv50_display_create; |
@@ -1041,6 +1051,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | |||
1041 | case NOUVEAU_GETPARAM_PTIMER_TIME: | 1051 | case NOUVEAU_GETPARAM_PTIMER_TIME: |
1042 | getparam->value = dev_priv->engine.timer.read(dev); | 1052 | getparam->value = dev_priv->engine.timer.read(dev); |
1043 | break; | 1053 | break; |
1054 | case NOUVEAU_GETPARAM_HAS_BO_USAGE: | ||
1055 | getparam->value = 1; | ||
1056 | break; | ||
1044 | case NOUVEAU_GETPARAM_GRAPH_UNITS: | 1057 | case NOUVEAU_GETPARAM_GRAPH_UNITS: |
1045 | /* NV40 and NV50 versions are quite different, but register | 1058 | /* NV40 and NV50 versions are quite different, but register |
1046 | * address is the same. User is supposed to know the card | 1059 | * address is the same. User is supposed to know the card |
@@ -1051,7 +1064,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | |||
1051 | } | 1064 | } |
1052 | /* FALLTHRU */ | 1065 | /* FALLTHRU */ |
1053 | default: | 1066 | default: |
1054 | NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); | 1067 | NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param); |
1055 | return -EINVAL; | 1068 | return -EINVAL; |
1056 | } | 1069 | } |
1057 | 1070 | ||
@@ -1066,7 +1079,7 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data, | |||
1066 | 1079 | ||
1067 | switch (setparam->param) { | 1080 | switch (setparam->param) { |
1068 | default: | 1081 | default: |
1069 | NV_ERROR(dev, "unknown parameter %lld\n", setparam->param); | 1082 | NV_DEBUG(dev, "unknown parameter %lld\n", setparam->param); |
1070 | return -EINVAL; | 1083 | return -EINVAL; |
1071 | } | 1084 | } |
1072 | 1085 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c index 16bbbf1eff63..7ecc4adc1e45 100644 --- a/drivers/gpu/drm/nouveau/nouveau_temp.c +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c | |||
@@ -191,7 +191,7 @@ nv40_temp_get(struct drm_device *dev) | |||
191 | int offset = sensor->offset_mult / sensor->offset_div; | 191 | int offset = sensor->offset_mult / sensor->offset_div; |
192 | int core_temp; | 192 | int core_temp; |
193 | 193 | ||
194 | if (dev_priv->chipset >= 0x50) { | 194 | if (dev_priv->card_type >= NV_50) { |
195 | core_temp = nv_rd32(dev, 0x20008); | 195 | core_temp = nv_rd32(dev, 0x20008); |
196 | } else { | 196 | } else { |
197 | core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff; | 197 | core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff; |
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index c71abc2a34d5..40e180741629 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c | |||
@@ -158,7 +158,6 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
158 | { | 158 | { |
159 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 159 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
160 | struct drm_device *dev = crtc->dev; | 160 | struct drm_device *dev = crtc->dev; |
161 | struct drm_connector *connector; | ||
162 | unsigned char seq1 = 0, crtc17 = 0; | 161 | unsigned char seq1 = 0, crtc17 = 0; |
163 | unsigned char crtc1A; | 162 | unsigned char crtc1A; |
164 | 163 | ||
@@ -213,10 +212,6 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
213 | NVVgaSeqReset(dev, nv_crtc->index, false); | 212 | NVVgaSeqReset(dev, nv_crtc->index, false); |
214 | 213 | ||
215 | NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A); | 214 | NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A); |
216 | |||
217 | /* Update connector polling modes */ | ||
218 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | ||
219 | nouveau_connector_set_polling(connector); | ||
220 | } | 215 | } |
221 | 216 | ||
222 | static bool | 217 | static bool |
@@ -831,7 +826,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
831 | /* Update the framebuffer location. */ | 826 | /* Update the framebuffer location. */ |
832 | regp->fb_start = nv_crtc->fb.offset & ~3; | 827 | regp->fb_start = nv_crtc->fb.offset & ~3; |
833 | regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8); | 828 | regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8); |
834 | NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start); | 829 | nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start); |
835 | 830 | ||
836 | /* Update the arbitration parameters. */ | 831 | /* Update the arbitration parameters. */ |
837 | nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel, | 832 | nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel, |
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index c936403b26e2..ef23550407b5 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c | |||
@@ -185,14 +185,15 @@ static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder, | |||
185 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 185 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
186 | struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); | 186 | struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); |
187 | 187 | ||
188 | /* For internal panels and gpu scaling on DVI we need the native mode */ | 188 | if (!nv_connector->native_mode || |
189 | if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { | 189 | nv_connector->scaling_mode == DRM_MODE_SCALE_NONE || |
190 | if (!nv_connector->native_mode) | 190 | mode->hdisplay > nv_connector->native_mode->hdisplay || |
191 | return false; | 191 | mode->vdisplay > nv_connector->native_mode->vdisplay) { |
192 | nv_encoder->mode = *adjusted_mode; | ||
193 | |||
194 | } else { | ||
192 | nv_encoder->mode = *nv_connector->native_mode; | 195 | nv_encoder->mode = *nv_connector->native_mode; |
193 | adjusted_mode->clock = nv_connector->native_mode->clock; | 196 | adjusted_mode->clock = nv_connector->native_mode->clock; |
194 | } else { | ||
195 | nv_encoder->mode = *adjusted_mode; | ||
196 | } | 197 | } |
197 | 198 | ||
198 | return true; | 199 | return true; |
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c index 6a6eb697d38e..eb1c70dd82ed 100644 --- a/drivers/gpu/drm/nouveau/nv04_pm.c +++ b/drivers/gpu/drm/nouveau/nv04_pm.c | |||
@@ -76,6 +76,15 @@ nv04_pm_clock_set(struct drm_device *dev, void *pre_state) | |||
76 | reg += 4; | 76 | reg += 4; |
77 | 77 | ||
78 | nouveau_hw_setpll(dev, reg, &state->calc); | 78 | nouveau_hw_setpll(dev, reg, &state->calc); |
79 | |||
80 | if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) { | ||
81 | if (dev_priv->card_type == NV_20) | ||
82 | nv_mask(dev, 0x1002c4, 0, 1 << 20); | ||
83 | |||
84 | /* Reset the DLLs */ | ||
85 | nv_mask(dev, 0x1002c0, 0, 1 << 8); | ||
86 | } | ||
87 | |||
79 | kfree(state); | 88 | kfree(state); |
80 | } | 89 | } |
81 | 90 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c index 2cdc2bfe7179..de81151648f8 100644 --- a/drivers/gpu/drm/nouveau/nv50_calc.c +++ b/drivers/gpu/drm/nouveau/nv50_calc.c | |||
@@ -51,24 +51,28 @@ nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk, | |||
51 | int *N, int *fN, int *M, int *P) | 51 | int *N, int *fN, int *M, int *P) |
52 | { | 52 | { |
53 | fixed20_12 fb_div, a, b; | 53 | fixed20_12 fb_div, a, b; |
54 | u32 refclk = pll->refclk / 10; | ||
55 | u32 max_vco_freq = pll->vco1.maxfreq / 10; | ||
56 | u32 max_vco_inputfreq = pll->vco1.max_inputfreq / 10; | ||
57 | clk /= 10; | ||
54 | 58 | ||
55 | *P = pll->vco1.maxfreq / clk; | 59 | *P = max_vco_freq / clk; |
56 | if (*P > pll->max_p) | 60 | if (*P > pll->max_p) |
57 | *P = pll->max_p; | 61 | *P = pll->max_p; |
58 | if (*P < pll->min_p) | 62 | if (*P < pll->min_p) |
59 | *P = pll->min_p; | 63 | *P = pll->min_p; |
60 | 64 | ||
61 | /* *M = ceil(refclk / pll->vco.max_inputfreq); */ | 65 | /* *M = floor((refclk + max_vco_inputfreq) / max_vco_inputfreq); */ |
62 | a.full = dfixed_const(pll->refclk); | 66 | a.full = dfixed_const(refclk + max_vco_inputfreq); |
63 | b.full = dfixed_const(pll->vco1.max_inputfreq); | 67 | b.full = dfixed_const(max_vco_inputfreq); |
64 | a.full = dfixed_div(a, b); | 68 | a.full = dfixed_div(a, b); |
65 | a.full = dfixed_ceil(a); | 69 | a.full = dfixed_floor(a); |
66 | *M = dfixed_trunc(a); | 70 | *M = dfixed_trunc(a); |
67 | 71 | ||
68 | /* fb_div = (vco * *M) / refclk; */ | 72 | /* fb_div = (vco * *M) / refclk; */ |
69 | fb_div.full = dfixed_const(clk * *P); | 73 | fb_div.full = dfixed_const(clk * *P); |
70 | fb_div.full = dfixed_mul(fb_div, a); | 74 | fb_div.full = dfixed_mul(fb_div, a); |
71 | a.full = dfixed_const(pll->refclk); | 75 | a.full = dfixed_const(refclk); |
72 | fb_div.full = dfixed_div(fb_div, a); | 76 | fb_div.full = dfixed_div(fb_div, a); |
73 | 77 | ||
74 | /* *N = floor(fb_div); */ | 78 | /* *N = floor(fb_div); */ |
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 16380d52cd88..56476d0c6de8 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
@@ -546,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
546 | } | 546 | } |
547 | 547 | ||
548 | nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; | 548 | nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; |
549 | nv_crtc->fb.tile_flags = fb->nvbo->tile_flags; | 549 | nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); |
550 | nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; | 550 | nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; |
551 | if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { | 551 | if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { |
552 | ret = RING_SPACE(evo, 2); | 552 | ret = RING_SPACE(evo, 2); |
@@ -578,7 +578,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
578 | fb->nvbo->tile_mode); | 578 | fb->nvbo->tile_mode); |
579 | } | 579 | } |
580 | if (dev_priv->chipset == 0x50) | 580 | if (dev_priv->chipset == 0x50) |
581 | OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format); | 581 | OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format); |
582 | else | 582 | else |
583 | OUT_RING(evo, format); | 583 | OUT_RING(evo, format); |
584 | 584 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 55c9663ef2bf..f624c611ddea 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -1032,11 +1032,18 @@ nv50_display_irq_hotplug_bh(struct work_struct *work) | |||
1032 | struct drm_connector *connector; | 1032 | struct drm_connector *connector; |
1033 | const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; | 1033 | const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; |
1034 | uint32_t unplug_mask, plug_mask, change_mask; | 1034 | uint32_t unplug_mask, plug_mask, change_mask; |
1035 | uint32_t hpd0, hpd1 = 0; | 1035 | uint32_t hpd0, hpd1; |
1036 | 1036 | ||
1037 | hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050); | 1037 | spin_lock_irq(&dev_priv->hpd_state.lock); |
1038 | hpd0 = dev_priv->hpd_state.hpd0_bits; | ||
1039 | dev_priv->hpd_state.hpd0_bits = 0; | ||
1040 | hpd1 = dev_priv->hpd_state.hpd1_bits; | ||
1041 | dev_priv->hpd_state.hpd1_bits = 0; | ||
1042 | spin_unlock_irq(&dev_priv->hpd_state.lock); | ||
1043 | |||
1044 | hpd0 &= nv_rd32(dev, 0xe050); | ||
1038 | if (dev_priv->chipset >= 0x90) | 1045 | if (dev_priv->chipset >= 0x90) |
1039 | hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070); | 1046 | hpd1 &= nv_rd32(dev, 0xe070); |
1040 | 1047 | ||
1041 | plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16); | 1048 | plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16); |
1042 | unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000); | 1049 | unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000); |
@@ -1078,10 +1085,6 @@ nv50_display_irq_hotplug_bh(struct work_struct *work) | |||
1078 | helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); | 1085 | helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); |
1079 | } | 1086 | } |
1080 | 1087 | ||
1081 | nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054)); | ||
1082 | if (dev_priv->chipset >= 0x90) | ||
1083 | nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); | ||
1084 | |||
1085 | drm_helper_hpd_irq_event(dev); | 1088 | drm_helper_hpd_irq_event(dev); |
1086 | } | 1089 | } |
1087 | 1090 | ||
@@ -1092,8 +1095,22 @@ nv50_display_irq_handler(struct drm_device *dev) | |||
1092 | uint32_t delayed = 0; | 1095 | uint32_t delayed = 0; |
1093 | 1096 | ||
1094 | if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { | 1097 | if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { |
1095 | if (!work_pending(&dev_priv->hpd_work)) | 1098 | uint32_t hpd0_bits, hpd1_bits = 0; |
1096 | queue_work(dev_priv->wq, &dev_priv->hpd_work); | 1099 | |
1100 | hpd0_bits = nv_rd32(dev, 0xe054); | ||
1101 | nv_wr32(dev, 0xe054, hpd0_bits); | ||
1102 | |||
1103 | if (dev_priv->chipset >= 0x90) { | ||
1104 | hpd1_bits = nv_rd32(dev, 0xe074); | ||
1105 | nv_wr32(dev, 0xe074, hpd1_bits); | ||
1106 | } | ||
1107 | |||
1108 | spin_lock(&dev_priv->hpd_state.lock); | ||
1109 | dev_priv->hpd_state.hpd0_bits |= hpd0_bits; | ||
1110 | dev_priv->hpd_state.hpd1_bits |= hpd1_bits; | ||
1111 | spin_unlock(&dev_priv->hpd_state.lock); | ||
1112 | |||
1113 | queue_work(dev_priv->wq, &dev_priv->hpd_work); | ||
1097 | } | 1114 | } |
1098 | 1115 | ||
1099 | while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { | 1116 | while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { |
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index a46a961102f3..1da65bd60c10 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
@@ -464,3 +464,8 @@ nv50_fifo_unload_context(struct drm_device *dev) | |||
464 | return 0; | 464 | return 0; |
465 | } | 465 | } |
466 | 466 | ||
467 | void | ||
468 | nv50_fifo_tlb_flush(struct drm_device *dev) | ||
469 | { | ||
470 | nv50_vm_flush(dev, 5); | ||
471 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index cbf5ae2f67d4..8b669d0af610 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -402,3 +402,55 @@ struct nouveau_pgraph_object_class nv50_graph_grclass[] = { | |||
402 | { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ | 402 | { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ |
403 | {} | 403 | {} |
404 | }; | 404 | }; |
405 | |||
406 | void | ||
407 | nv50_graph_tlb_flush(struct drm_device *dev) | ||
408 | { | ||
409 | nv50_vm_flush(dev, 0); | ||
410 | } | ||
411 | |||
412 | void | ||
413 | nv86_graph_tlb_flush(struct drm_device *dev) | ||
414 | { | ||
415 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
416 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; | ||
417 | bool idle, timeout = false; | ||
418 | unsigned long flags; | ||
419 | u64 start; | ||
420 | u32 tmp; | ||
421 | |||
422 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
423 | nv_mask(dev, 0x400500, 0x00000001, 0x00000000); | ||
424 | |||
425 | start = ptimer->read(dev); | ||
426 | do { | ||
427 | idle = true; | ||
428 | |||
429 | for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) { | ||
430 | if ((tmp & 7) == 1) | ||
431 | idle = false; | ||
432 | } | ||
433 | |||
434 | for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) { | ||
435 | if ((tmp & 7) == 1) | ||
436 | idle = false; | ||
437 | } | ||
438 | |||
439 | for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) { | ||
440 | if ((tmp & 7) == 1) | ||
441 | idle = false; | ||
442 | } | ||
443 | } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000)); | ||
444 | |||
445 | if (timeout) { | ||
446 | NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: " | ||
447 | "0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
448 | nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380), | ||
449 | nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388)); | ||
450 | } | ||
451 | |||
452 | nv50_vm_flush(dev, 0); | ||
453 | |||
454 | nv_mask(dev, 0x400500, 0x00000001, 0x00000001); | ||
455 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
456 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index a53fc974332b..b773229b7647 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -402,7 +402,6 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
402 | } | 402 | } |
403 | dev_priv->engine.instmem.flush(dev); | 403 | dev_priv->engine.instmem.flush(dev); |
404 | 404 | ||
405 | nv50_vm_flush(dev, 4); | ||
406 | nv50_vm_flush(dev, 6); | 405 | nv50_vm_flush(dev, 6); |
407 | 406 | ||
408 | gpuobj->im_bound = 1; | 407 | gpuobj->im_bound = 1; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 488c36c8f5e6..4dc5b4714c5a 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -1650,7 +1650,36 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1650 | } | 1650 | } |
1651 | } | 1651 | } |
1652 | 1652 | ||
1653 | rdev->config.evergreen.tile_config = gb_addr_config; | 1653 | /* setup tiling info dword. gb_addr_config is not adequate since it does |
1654 | * not have bank info, so create a custom tiling dword. | ||
1655 | * bits 3:0 num_pipes | ||
1656 | * bits 7:4 num_banks | ||
1657 | * bits 11:8 group_size | ||
1658 | * bits 15:12 row_size | ||
1659 | */ | ||
1660 | rdev->config.evergreen.tile_config = 0; | ||
1661 | switch (rdev->config.evergreen.max_tile_pipes) { | ||
1662 | case 1: | ||
1663 | default: | ||
1664 | rdev->config.evergreen.tile_config |= (0 << 0); | ||
1665 | break; | ||
1666 | case 2: | ||
1667 | rdev->config.evergreen.tile_config |= (1 << 0); | ||
1668 | break; | ||
1669 | case 4: | ||
1670 | rdev->config.evergreen.tile_config |= (2 << 0); | ||
1671 | break; | ||
1672 | case 8: | ||
1673 | rdev->config.evergreen.tile_config |= (3 << 0); | ||
1674 | break; | ||
1675 | } | ||
1676 | rdev->config.evergreen.tile_config |= | ||
1677 | ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; | ||
1678 | rdev->config.evergreen.tile_config |= | ||
1679 | ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8; | ||
1680 | rdev->config.evergreen.tile_config |= | ||
1681 | ((gb_addr_config & 0x30000000) >> 28) << 12; | ||
1682 | |||
1654 | WREG32(GB_BACKEND_MAP, gb_backend_map); | 1683 | WREG32(GB_BACKEND_MAP, gb_backend_map); |
1655 | WREG32(GB_ADDR_CONFIG, gb_addr_config); | 1684 | WREG32(GB_ADDR_CONFIG, gb_addr_config); |
1656 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 1685 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index ac3b6dde23db..e0e590110dd4 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c | |||
@@ -459,7 +459,7 @@ int evergreen_blit_init(struct radeon_device *rdev) | |||
459 | obj_size += evergreen_ps_size * 4; | 459 | obj_size += evergreen_ps_size * 4; |
460 | obj_size = ALIGN(obj_size, 256); | 460 | obj_size = ALIGN(obj_size, 256); |
461 | 461 | ||
462 | r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, | 462 | r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
463 | &rdev->r600_blit.shader_obj); | 463 | &rdev->r600_blit.shader_obj); |
464 | if (r) { | 464 | if (r) { |
465 | DRM_ERROR("evergreen failed to allocate shader\n"); | 465 | DRM_ERROR("evergreen failed to allocate shader\n"); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 0f806cc7dc75..a3552594ccc4 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2718,7 +2718,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev) | |||
2718 | /* Allocate ring buffer */ | 2718 | /* Allocate ring buffer */ |
2719 | if (rdev->ih.ring_obj == NULL) { | 2719 | if (rdev->ih.ring_obj == NULL) { |
2720 | r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, | 2720 | r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, |
2721 | true, | 2721 | PAGE_SIZE, true, |
2722 | RADEON_GEM_DOMAIN_GTT, | 2722 | RADEON_GEM_DOMAIN_GTT, |
2723 | &rdev->ih.ring_obj); | 2723 | &rdev->ih.ring_obj); |
2724 | if (r) { | 2724 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 8362974ef41a..86e5aa07f0db 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -501,7 +501,7 @@ int r600_blit_init(struct radeon_device *rdev) | |||
501 | obj_size += r6xx_ps_size * 4; | 501 | obj_size += r6xx_ps_size * 4; |
502 | obj_size = ALIGN(obj_size, 256); | 502 | obj_size = ALIGN(obj_size, 256); |
503 | 503 | ||
504 | r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, | 504 | r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
505 | &rdev->r600_blit.shader_obj); | 505 | &rdev->r600_blit.shader_obj); |
506 | if (r) { | 506 | if (r) { |
507 | DRM_ERROR("r600 failed to allocate shader\n"); | 507 | DRM_ERROR("r600 failed to allocate shader\n"); |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 37cc2aa9f923..9bebac1ec006 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -50,6 +50,7 @@ struct r600_cs_track { | |||
50 | u32 nsamples; | 50 | u32 nsamples; |
51 | u32 cb_color_base_last[8]; | 51 | u32 cb_color_base_last[8]; |
52 | struct radeon_bo *cb_color_bo[8]; | 52 | struct radeon_bo *cb_color_bo[8]; |
53 | u64 cb_color_bo_mc[8]; | ||
53 | u32 cb_color_bo_offset[8]; | 54 | u32 cb_color_bo_offset[8]; |
54 | struct radeon_bo *cb_color_frag_bo[8]; | 55 | struct radeon_bo *cb_color_frag_bo[8]; |
55 | struct radeon_bo *cb_color_tile_bo[8]; | 56 | struct radeon_bo *cb_color_tile_bo[8]; |
@@ -67,6 +68,7 @@ struct r600_cs_track { | |||
67 | u32 db_depth_size; | 68 | u32 db_depth_size; |
68 | u32 db_offset; | 69 | u32 db_offset; |
69 | struct radeon_bo *db_bo; | 70 | struct radeon_bo *db_bo; |
71 | u64 db_bo_mc; | ||
70 | }; | 72 | }; |
71 | 73 | ||
72 | static inline int r600_bpe_from_format(u32 *bpe, u32 format) | 74 | static inline int r600_bpe_from_format(u32 *bpe, u32 format) |
@@ -140,6 +142,68 @@ static inline int r600_bpe_from_format(u32 *bpe, u32 format) | |||
140 | return 0; | 142 | return 0; |
141 | } | 143 | } |
142 | 144 | ||
145 | struct array_mode_checker { | ||
146 | int array_mode; | ||
147 | u32 group_size; | ||
148 | u32 nbanks; | ||
149 | u32 npipes; | ||
150 | u32 nsamples; | ||
151 | u32 bpe; | ||
152 | }; | ||
153 | |||
154 | /* returns alignment in pixels for pitch/height/depth and bytes for base */ | ||
155 | static inline int r600_get_array_mode_alignment(struct array_mode_checker *values, | ||
156 | u32 *pitch_align, | ||
157 | u32 *height_align, | ||
158 | u32 *depth_align, | ||
159 | u64 *base_align) | ||
160 | { | ||
161 | u32 tile_width = 8; | ||
162 | u32 tile_height = 8; | ||
163 | u32 macro_tile_width = values->nbanks; | ||
164 | u32 macro_tile_height = values->npipes; | ||
165 | u32 tile_bytes = tile_width * tile_height * values->bpe * values->nsamples; | ||
166 | u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes; | ||
167 | |||
168 | switch (values->array_mode) { | ||
169 | case ARRAY_LINEAR_GENERAL: | ||
170 | /* technically tile_width/_height for pitch/height */ | ||
171 | *pitch_align = 1; /* tile_width */ | ||
172 | *height_align = 1; /* tile_height */ | ||
173 | *depth_align = 1; | ||
174 | *base_align = 1; | ||
175 | break; | ||
176 | case ARRAY_LINEAR_ALIGNED: | ||
177 | *pitch_align = max((u32)64, (u32)(values->group_size / values->bpe)); | ||
178 | *height_align = tile_height; | ||
179 | *depth_align = 1; | ||
180 | *base_align = values->group_size; | ||
181 | break; | ||
182 | case ARRAY_1D_TILED_THIN1: | ||
183 | *pitch_align = max((u32)tile_width, | ||
184 | (u32)(values->group_size / | ||
185 | (tile_height * values->bpe * values->nsamples))); | ||
186 | *height_align = tile_height; | ||
187 | *depth_align = 1; | ||
188 | *base_align = values->group_size; | ||
189 | break; | ||
190 | case ARRAY_2D_TILED_THIN1: | ||
191 | *pitch_align = max((u32)macro_tile_width, | ||
192 | (u32)(((values->group_size / tile_height) / | ||
193 | (values->bpe * values->nsamples)) * | ||
194 | values->nbanks)) * tile_width; | ||
195 | *height_align = macro_tile_height * tile_height; | ||
196 | *depth_align = 1; | ||
197 | *base_align = max(macro_tile_bytes, | ||
198 | (*pitch_align) * values->bpe * (*height_align) * values->nsamples); | ||
199 | break; | ||
200 | default: | ||
201 | return -EINVAL; | ||
202 | } | ||
203 | |||
204 | return 0; | ||
205 | } | ||
206 | |||
143 | static void r600_cs_track_init(struct r600_cs_track *track) | 207 | static void r600_cs_track_init(struct r600_cs_track *track) |
144 | { | 208 | { |
145 | int i; | 209 | int i; |
@@ -153,10 +217,12 @@ static void r600_cs_track_init(struct r600_cs_track *track) | |||
153 | track->cb_color_info[i] = 0; | 217 | track->cb_color_info[i] = 0; |
154 | track->cb_color_bo[i] = NULL; | 218 | track->cb_color_bo[i] = NULL; |
155 | track->cb_color_bo_offset[i] = 0xFFFFFFFF; | 219 | track->cb_color_bo_offset[i] = 0xFFFFFFFF; |
220 | track->cb_color_bo_mc[i] = 0xFFFFFFFF; | ||
156 | } | 221 | } |
157 | track->cb_target_mask = 0xFFFFFFFF; | 222 | track->cb_target_mask = 0xFFFFFFFF; |
158 | track->cb_shader_mask = 0xFFFFFFFF; | 223 | track->cb_shader_mask = 0xFFFFFFFF; |
159 | track->db_bo = NULL; | 224 | track->db_bo = NULL; |
225 | track->db_bo_mc = 0xFFFFFFFF; | ||
160 | /* assume the biggest format and that htile is enabled */ | 226 | /* assume the biggest format and that htile is enabled */ |
161 | track->db_depth_info = 7 | (1 << 25); | 227 | track->db_depth_info = 7 | (1 << 25); |
162 | track->db_depth_view = 0xFFFFC000; | 228 | track->db_depth_view = 0xFFFFC000; |
@@ -168,7 +234,10 @@ static void r600_cs_track_init(struct r600_cs_track *track) | |||
168 | static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | 234 | static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) |
169 | { | 235 | { |
170 | struct r600_cs_track *track = p->track; | 236 | struct r600_cs_track *track = p->track; |
171 | u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align; | 237 | u32 bpe = 0, slice_tile_max, size, tmp; |
238 | u32 height, height_align, pitch, pitch_align, depth_align; | ||
239 | u64 base_offset, base_align; | ||
240 | struct array_mode_checker array_check; | ||
172 | volatile u32 *ib = p->ib->ptr; | 241 | volatile u32 *ib = p->ib->ptr; |
173 | unsigned array_mode; | 242 | unsigned array_mode; |
174 | 243 | ||
@@ -183,60 +252,40 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
183 | i, track->cb_color_info[i]); | 252 | i, track->cb_color_info[i]); |
184 | return -EINVAL; | 253 | return -EINVAL; |
185 | } | 254 | } |
186 | /* pitch is the number of 8x8 tiles per row */ | 255 | /* pitch in pixels */ |
187 | pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1; | 256 | pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8; |
188 | slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; | 257 | slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; |
189 | slice_tile_max *= 64; | 258 | slice_tile_max *= 64; |
190 | height = slice_tile_max / (pitch * 8); | 259 | height = slice_tile_max / pitch; |
191 | if (height > 8192) | 260 | if (height > 8192) |
192 | height = 8192; | 261 | height = 8192; |
193 | array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); | 262 | array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); |
263 | |||
264 | base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i]; | ||
265 | array_check.array_mode = array_mode; | ||
266 | array_check.group_size = track->group_size; | ||
267 | array_check.nbanks = track->nbanks; | ||
268 | array_check.npipes = track->npipes; | ||
269 | array_check.nsamples = track->nsamples; | ||
270 | array_check.bpe = bpe; | ||
271 | if (r600_get_array_mode_alignment(&array_check, | ||
272 | &pitch_align, &height_align, &depth_align, &base_align)) { | ||
273 | dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, | ||
274 | G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, | ||
275 | track->cb_color_info[i]); | ||
276 | return -EINVAL; | ||
277 | } | ||
194 | switch (array_mode) { | 278 | switch (array_mode) { |
195 | case V_0280A0_ARRAY_LINEAR_GENERAL: | 279 | case V_0280A0_ARRAY_LINEAR_GENERAL: |
196 | /* technically height & 0x7 */ | ||
197 | break; | 280 | break; |
198 | case V_0280A0_ARRAY_LINEAR_ALIGNED: | 281 | case V_0280A0_ARRAY_LINEAR_ALIGNED: |
199 | pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8; | ||
200 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
201 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | ||
202 | __func__, __LINE__, pitch); | ||
203 | return -EINVAL; | ||
204 | } | ||
205 | if (!IS_ALIGNED(height, 8)) { | ||
206 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | ||
207 | __func__, __LINE__, height); | ||
208 | return -EINVAL; | ||
209 | } | ||
210 | break; | 282 | break; |
211 | case V_0280A0_ARRAY_1D_TILED_THIN1: | 283 | case V_0280A0_ARRAY_1D_TILED_THIN1: |
212 | pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe * track->nsamples))) / 8; | ||
213 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
214 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | ||
215 | __func__, __LINE__, pitch); | ||
216 | return -EINVAL; | ||
217 | } | ||
218 | /* avoid breaking userspace */ | 284 | /* avoid breaking userspace */ |
219 | if (height > 7) | 285 | if (height > 7) |
220 | height &= ~0x7; | 286 | height &= ~0x7; |
221 | if (!IS_ALIGNED(height, 8)) { | ||
222 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | ||
223 | __func__, __LINE__, height); | ||
224 | return -EINVAL; | ||
225 | } | ||
226 | break; | 287 | break; |
227 | case V_0280A0_ARRAY_2D_TILED_THIN1: | 288 | case V_0280A0_ARRAY_2D_TILED_THIN1: |
228 | pitch_align = max((u32)track->nbanks, | ||
229 | (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks)) / 8; | ||
230 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
231 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | ||
232 | __func__, __LINE__, pitch); | ||
233 | return -EINVAL; | ||
234 | } | ||
235 | if (!IS_ALIGNED((height / 8), track->npipes)) { | ||
236 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | ||
237 | __func__, __LINE__, height); | ||
238 | return -EINVAL; | ||
239 | } | ||
240 | break; | 289 | break; |
241 | default: | 290 | default: |
242 | dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, | 291 | dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, |
@@ -244,13 +293,29 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
244 | track->cb_color_info[i]); | 293 | track->cb_color_info[i]); |
245 | return -EINVAL; | 294 | return -EINVAL; |
246 | } | 295 | } |
296 | |||
297 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
298 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | ||
299 | __func__, __LINE__, pitch); | ||
300 | return -EINVAL; | ||
301 | } | ||
302 | if (!IS_ALIGNED(height, height_align)) { | ||
303 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | ||
304 | __func__, __LINE__, height); | ||
305 | return -EINVAL; | ||
306 | } | ||
307 | if (!IS_ALIGNED(base_offset, base_align)) { | ||
308 | dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); | ||
309 | return -EINVAL; | ||
310 | } | ||
311 | |||
247 | /* check offset */ | 312 | /* check offset */ |
248 | tmp = height * pitch * 8 * bpe; | 313 | tmp = height * pitch * bpe; |
249 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { | 314 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { |
250 | if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { | 315 | if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { |
251 | /* the initial DDX does bad things with the CB size occasionally */ | 316 | /* the initial DDX does bad things with the CB size occasionally */ |
252 | /* it rounds up height too far for slice tile max but the BO is smaller */ | 317 | /* it rounds up height too far for slice tile max but the BO is smaller */ |
253 | tmp = (height - 7) * 8 * bpe; | 318 | tmp = (height - 7) * pitch * bpe; |
254 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { | 319 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { |
255 | dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); | 320 | dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); |
256 | return -EINVAL; | 321 | return -EINVAL; |
@@ -260,15 +325,11 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
260 | return -EINVAL; | 325 | return -EINVAL; |
261 | } | 326 | } |
262 | } | 327 | } |
263 | if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) { | ||
264 | dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]); | ||
265 | return -EINVAL; | ||
266 | } | ||
267 | /* limit max tile */ | 328 | /* limit max tile */ |
268 | tmp = (height * pitch * 8) >> 6; | 329 | tmp = (height * pitch) >> 6; |
269 | if (tmp < slice_tile_max) | 330 | if (tmp < slice_tile_max) |
270 | slice_tile_max = tmp; | 331 | slice_tile_max = tmp; |
271 | tmp = S_028060_PITCH_TILE_MAX(pitch - 1) | | 332 | tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | |
272 | S_028060_SLICE_TILE_MAX(slice_tile_max - 1); | 333 | S_028060_SLICE_TILE_MAX(slice_tile_max - 1); |
273 | ib[track->cb_color_size_idx[i]] = tmp; | 334 | ib[track->cb_color_size_idx[i]] = tmp; |
274 | return 0; | 335 | return 0; |
@@ -310,7 +371,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
310 | /* Check depth buffer */ | 371 | /* Check depth buffer */ |
311 | if (G_028800_STENCIL_ENABLE(track->db_depth_control) || | 372 | if (G_028800_STENCIL_ENABLE(track->db_depth_control) || |
312 | G_028800_Z_ENABLE(track->db_depth_control)) { | 373 | G_028800_Z_ENABLE(track->db_depth_control)) { |
313 | u32 nviews, bpe, ntiles, pitch, pitch_align, height, size, slice_tile_max; | 374 | u32 nviews, bpe, ntiles, size, slice_tile_max; |
375 | u32 height, height_align, pitch, pitch_align, depth_align; | ||
376 | u64 base_offset, base_align; | ||
377 | struct array_mode_checker array_check; | ||
378 | int array_mode; | ||
379 | |||
314 | if (track->db_bo == NULL) { | 380 | if (track->db_bo == NULL) { |
315 | dev_warn(p->dev, "z/stencil with no depth buffer\n"); | 381 | dev_warn(p->dev, "z/stencil with no depth buffer\n"); |
316 | return -EINVAL; | 382 | return -EINVAL; |
@@ -353,41 +419,34 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
353 | ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); | 419 | ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); |
354 | } else { | 420 | } else { |
355 | size = radeon_bo_size(track->db_bo); | 421 | size = radeon_bo_size(track->db_bo); |
356 | pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1; | 422 | /* pitch in pixels */ |
423 | pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; | ||
357 | slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; | 424 | slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; |
358 | slice_tile_max *= 64; | 425 | slice_tile_max *= 64; |
359 | height = slice_tile_max / (pitch * 8); | 426 | height = slice_tile_max / pitch; |
360 | if (height > 8192) | 427 | if (height > 8192) |
361 | height = 8192; | 428 | height = 8192; |
362 | switch (G_028010_ARRAY_MODE(track->db_depth_info)) { | 429 | base_offset = track->db_bo_mc + track->db_offset; |
430 | array_mode = G_028010_ARRAY_MODE(track->db_depth_info); | ||
431 | array_check.array_mode = array_mode; | ||
432 | array_check.group_size = track->group_size; | ||
433 | array_check.nbanks = track->nbanks; | ||
434 | array_check.npipes = track->npipes; | ||
435 | array_check.nsamples = track->nsamples; | ||
436 | array_check.bpe = bpe; | ||
437 | if (r600_get_array_mode_alignment(&array_check, | ||
438 | &pitch_align, &height_align, &depth_align, &base_align)) { | ||
439 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | ||
440 | G_028010_ARRAY_MODE(track->db_depth_info), | ||
441 | track->db_depth_info); | ||
442 | return -EINVAL; | ||
443 | } | ||
444 | switch (array_mode) { | ||
363 | case V_028010_ARRAY_1D_TILED_THIN1: | 445 | case V_028010_ARRAY_1D_TILED_THIN1: |
364 | pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8); | ||
365 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
366 | dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", | ||
367 | __func__, __LINE__, pitch); | ||
368 | return -EINVAL; | ||
369 | } | ||
370 | /* don't break userspace */ | 446 | /* don't break userspace */ |
371 | height &= ~0x7; | 447 | height &= ~0x7; |
372 | if (!IS_ALIGNED(height, 8)) { | ||
373 | dev_warn(p->dev, "%s:%d db height (%d) invalid\n", | ||
374 | __func__, __LINE__, height); | ||
375 | return -EINVAL; | ||
376 | } | ||
377 | break; | 448 | break; |
378 | case V_028010_ARRAY_2D_TILED_THIN1: | 449 | case V_028010_ARRAY_2D_TILED_THIN1: |
379 | pitch_align = max((u32)track->nbanks, | ||
380 | (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8; | ||
381 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
382 | dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", | ||
383 | __func__, __LINE__, pitch); | ||
384 | return -EINVAL; | ||
385 | } | ||
386 | if (!IS_ALIGNED((height / 8), track->npipes)) { | ||
387 | dev_warn(p->dev, "%s:%d db height (%d) invalid\n", | ||
388 | __func__, __LINE__, height); | ||
389 | return -EINVAL; | ||
390 | } | ||
391 | break; | 450 | break; |
392 | default: | 451 | default: |
393 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | 452 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, |
@@ -395,15 +454,27 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
395 | track->db_depth_info); | 454 | track->db_depth_info); |
396 | return -EINVAL; | 455 | return -EINVAL; |
397 | } | 456 | } |
398 | if (!IS_ALIGNED(track->db_offset, track->group_size)) { | 457 | |
399 | dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->db_offset); | 458 | if (!IS_ALIGNED(pitch, pitch_align)) { |
459 | dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", | ||
460 | __func__, __LINE__, pitch); | ||
461 | return -EINVAL; | ||
462 | } | ||
463 | if (!IS_ALIGNED(height, height_align)) { | ||
464 | dev_warn(p->dev, "%s:%d db height (%d) invalid\n", | ||
465 | __func__, __LINE__, height); | ||
400 | return -EINVAL; | 466 | return -EINVAL; |
401 | } | 467 | } |
468 | if (!IS_ALIGNED(base_offset, base_align)) { | ||
469 | dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); | ||
470 | return -EINVAL; | ||
471 | } | ||
472 | |||
402 | ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; | 473 | ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; |
403 | nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; | 474 | nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; |
404 | tmp = ntiles * bpe * 64 * nviews; | 475 | tmp = ntiles * bpe * 64 * nviews; |
405 | if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { | 476 | if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { |
406 | dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n", | 477 | dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n", |
407 | track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, | 478 | track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, |
408 | radeon_bo_size(track->db_bo)); | 479 | radeon_bo_size(track->db_bo)); |
409 | return -EINVAL; | 480 | return -EINVAL; |
@@ -954,6 +1025,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
954 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1025 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
955 | track->cb_color_base_last[tmp] = ib[idx]; | 1026 | track->cb_color_base_last[tmp] = ib[idx]; |
956 | track->cb_color_bo[tmp] = reloc->robj; | 1027 | track->cb_color_bo[tmp] = reloc->robj; |
1028 | track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; | ||
957 | break; | 1029 | break; |
958 | case DB_DEPTH_BASE: | 1030 | case DB_DEPTH_BASE: |
959 | r = r600_cs_packet_next_reloc(p, &reloc); | 1031 | r = r600_cs_packet_next_reloc(p, &reloc); |
@@ -965,6 +1037,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
965 | track->db_offset = radeon_get_ib_value(p, idx) << 8; | 1037 | track->db_offset = radeon_get_ib_value(p, idx) << 8; |
966 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1038 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
967 | track->db_bo = reloc->robj; | 1039 | track->db_bo = reloc->robj; |
1040 | track->db_bo_mc = reloc->lobj.gpu_offset; | ||
968 | break; | 1041 | break; |
969 | case DB_HTILE_DATA_BASE: | 1042 | case DB_HTILE_DATA_BASE: |
970 | case SQ_PGM_START_FS: | 1043 | case SQ_PGM_START_FS: |
@@ -1086,16 +1159,25 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels | |||
1086 | static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, | 1159 | static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, |
1087 | struct radeon_bo *texture, | 1160 | struct radeon_bo *texture, |
1088 | struct radeon_bo *mipmap, | 1161 | struct radeon_bo *mipmap, |
1162 | u64 base_offset, | ||
1163 | u64 mip_offset, | ||
1089 | u32 tiling_flags) | 1164 | u32 tiling_flags) |
1090 | { | 1165 | { |
1091 | struct r600_cs_track *track = p->track; | 1166 | struct r600_cs_track *track = p->track; |
1092 | u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0; | 1167 | u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0; |
1093 | u32 word0, word1, l0_size, mipmap_size, pitch, pitch_align; | 1168 | u32 word0, word1, l0_size, mipmap_size; |
1169 | u32 height_align, pitch, pitch_align, depth_align; | ||
1170 | u64 base_align; | ||
1171 | struct array_mode_checker array_check; | ||
1094 | 1172 | ||
1095 | /* on legacy kernel we don't perform advanced check */ | 1173 | /* on legacy kernel we don't perform advanced check */ |
1096 | if (p->rdev == NULL) | 1174 | if (p->rdev == NULL) |
1097 | return 0; | 1175 | return 0; |
1098 | 1176 | ||
1177 | /* convert to bytes */ | ||
1178 | base_offset <<= 8; | ||
1179 | mip_offset <<= 8; | ||
1180 | |||
1099 | word0 = radeon_get_ib_value(p, idx + 0); | 1181 | word0 = radeon_get_ib_value(p, idx + 0); |
1100 | if (tiling_flags & RADEON_TILING_MACRO) | 1182 | if (tiling_flags & RADEON_TILING_MACRO) |
1101 | word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); | 1183 | word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); |
@@ -1128,46 +1210,38 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i | |||
1128 | return -EINVAL; | 1210 | return -EINVAL; |
1129 | } | 1211 | } |
1130 | 1212 | ||
1131 | pitch = G_038000_PITCH(word0) + 1; | 1213 | /* pitch in texels */ |
1132 | switch (G_038000_TILE_MODE(word0)) { | 1214 | pitch = (G_038000_PITCH(word0) + 1) * 8; |
1133 | case V_038000_ARRAY_LINEAR_GENERAL: | 1215 | array_check.array_mode = G_038000_TILE_MODE(word0); |
1134 | pitch_align = 1; | 1216 | array_check.group_size = track->group_size; |
1135 | /* XXX check height align */ | 1217 | array_check.nbanks = track->nbanks; |
1136 | break; | 1218 | array_check.npipes = track->npipes; |
1137 | case V_038000_ARRAY_LINEAR_ALIGNED: | 1219 | array_check.nsamples = 1; |
1138 | pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8; | 1220 | array_check.bpe = bpe; |
1139 | if (!IS_ALIGNED(pitch, pitch_align)) { | 1221 | if (r600_get_array_mode_alignment(&array_check, |
1140 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", | 1222 | &pitch_align, &height_align, &depth_align, &base_align)) { |
1141 | __func__, __LINE__, pitch); | 1223 | dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", |
1142 | return -EINVAL; | 1224 | __func__, __LINE__, G_038000_TILE_MODE(word0)); |
1143 | } | 1225 | return -EINVAL; |
1144 | /* XXX check height align */ | 1226 | } |
1145 | break; | 1227 | |
1146 | case V_038000_ARRAY_1D_TILED_THIN1: | 1228 | /* XXX check height as well... */ |
1147 | pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8; | 1229 | |
1148 | if (!IS_ALIGNED(pitch, pitch_align)) { | 1230 | if (!IS_ALIGNED(pitch, pitch_align)) { |
1149 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", | 1231 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", |
1150 | __func__, __LINE__, pitch); | 1232 | __func__, __LINE__, pitch); |
1151 | return -EINVAL; | 1233 | return -EINVAL; |
1152 | } | 1234 | } |
1153 | /* XXX check height align */ | 1235 | if (!IS_ALIGNED(base_offset, base_align)) { |
1154 | break; | 1236 | dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n", |
1155 | case V_038000_ARRAY_2D_TILED_THIN1: | 1237 | __func__, __LINE__, base_offset); |
1156 | pitch_align = max((u32)track->nbanks, | 1238 | return -EINVAL; |
1157 | (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8; | 1239 | } |
1158 | if (!IS_ALIGNED(pitch, pitch_align)) { | 1240 | if (!IS_ALIGNED(mip_offset, base_align)) { |
1159 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", | 1241 | dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n", |
1160 | __func__, __LINE__, pitch); | 1242 | __func__, __LINE__, mip_offset); |
1161 | return -EINVAL; | ||
1162 | } | ||
1163 | /* XXX check height align */ | ||
1164 | break; | ||
1165 | default: | ||
1166 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | ||
1167 | G_038000_TILE_MODE(word0), word0); | ||
1168 | return -EINVAL; | 1243 | return -EINVAL; |
1169 | } | 1244 | } |
1170 | /* XXX check offset align */ | ||
1171 | 1245 | ||
1172 | word0 = radeon_get_ib_value(p, idx + 4); | 1246 | word0 = radeon_get_ib_value(p, idx + 4); |
1173 | word1 = radeon_get_ib_value(p, idx + 5); | 1247 | word1 = radeon_get_ib_value(p, idx + 5); |
@@ -1402,7 +1476,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1402 | mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1476 | mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
1403 | mipmap = reloc->robj; | 1477 | mipmap = reloc->robj; |
1404 | r = r600_check_texture_resource(p, idx+(i*7)+1, | 1478 | r = r600_check_texture_resource(p, idx+(i*7)+1, |
1405 | texture, mipmap, reloc->lobj.tiling_flags); | 1479 | texture, mipmap, |
1480 | base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), | ||
1481 | mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), | ||
1482 | reloc->lobj.tiling_flags); | ||
1406 | if (r) | 1483 | if (r) |
1407 | return r; | 1484 | return r; |
1408 | ib[idx+1+(i*7)+2] += base_offset; | 1485 | ib[idx+1+(i*7)+2] += base_offset; |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 966a793e225b..bff4dc4f410f 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -51,6 +51,12 @@ | |||
51 | #define PTE_READABLE (1 << 5) | 51 | #define PTE_READABLE (1 << 5) |
52 | #define PTE_WRITEABLE (1 << 6) | 52 | #define PTE_WRITEABLE (1 << 6) |
53 | 53 | ||
54 | /* tiling bits */ | ||
55 | #define ARRAY_LINEAR_GENERAL 0x00000000 | ||
56 | #define ARRAY_LINEAR_ALIGNED 0x00000001 | ||
57 | #define ARRAY_1D_TILED_THIN1 0x00000002 | ||
58 | #define ARRAY_2D_TILED_THIN1 0x00000004 | ||
59 | |||
54 | /* Registers */ | 60 | /* Registers */ |
55 | #define ARB_POP 0x2418 | 61 | #define ARB_POP 0x2418 |
56 | #define ENABLE_TC128 (1 << 30) | 62 | #define ENABLE_TC128 (1 << 30) |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 73f600d39ad4..3a7095743d44 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -1262,6 +1262,10 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); | |||
1262 | (rdev->family == CHIP_RS400) || \ | 1262 | (rdev->family == CHIP_RS400) || \ |
1263 | (rdev->family == CHIP_RS480)) | 1263 | (rdev->family == CHIP_RS480)) |
1264 | #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) | 1264 | #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) |
1265 | #define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \ | ||
1266 | (rdev->family == CHIP_RS690) || \ | ||
1267 | (rdev->family == CHIP_RS740) || \ | ||
1268 | (rdev->family >= CHIP_R600)) | ||
1265 | #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) | 1269 | #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) |
1266 | #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) | 1270 | #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) |
1267 | #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) | 1271 | #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) |
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 7932dc4d6b90..c558685cc637 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
41 | 41 | ||
42 | size = bsize; | 42 | size = bsize; |
43 | n = 1024; | 43 | n = 1024; |
44 | r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj); | 44 | r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj); |
45 | if (r) { | 45 | if (r) { |
46 | goto out_cleanup; | 46 | goto out_cleanup; |
47 | } | 47 | } |
@@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
53 | if (r) { | 53 | if (r) { |
54 | goto out_cleanup; | 54 | goto out_cleanup; |
55 | } | 55 | } |
56 | r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj); | 56 | r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj); |
57 | if (r) { | 57 | if (r) { |
58 | goto out_cleanup; | 58 | goto out_cleanup; |
59 | } | 59 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 7b7ea269549c..3bddea5b5295 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -571,6 +571,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde | |||
571 | } | 571 | } |
572 | 572 | ||
573 | if (clk_mask && data_mask) { | 573 | if (clk_mask && data_mask) { |
574 | /* system specific masks */ | ||
574 | i2c.mask_clk_mask = clk_mask; | 575 | i2c.mask_clk_mask = clk_mask; |
575 | i2c.mask_data_mask = data_mask; | 576 | i2c.mask_data_mask = data_mask; |
576 | i2c.a_clk_mask = clk_mask; | 577 | i2c.a_clk_mask = clk_mask; |
@@ -579,7 +580,19 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde | |||
579 | i2c.en_data_mask = data_mask; | 580 | i2c.en_data_mask = data_mask; |
580 | i2c.y_clk_mask = clk_mask; | 581 | i2c.y_clk_mask = clk_mask; |
581 | i2c.y_data_mask = data_mask; | 582 | i2c.y_data_mask = data_mask; |
583 | } else if ((ddc_line == RADEON_GPIOPAD_MASK) || | ||
584 | (ddc_line == RADEON_MDGPIO_MASK)) { | ||
585 | /* default gpiopad masks */ | ||
586 | i2c.mask_clk_mask = (0x20 << 8); | ||
587 | i2c.mask_data_mask = 0x80; | ||
588 | i2c.a_clk_mask = (0x20 << 8); | ||
589 | i2c.a_data_mask = 0x80; | ||
590 | i2c.en_clk_mask = (0x20 << 8); | ||
591 | i2c.en_data_mask = 0x80; | ||
592 | i2c.y_clk_mask = (0x20 << 8); | ||
593 | i2c.y_data_mask = 0x80; | ||
582 | } else { | 594 | } else { |
595 | /* default masks for ddc pads */ | ||
583 | i2c.mask_clk_mask = RADEON_GPIO_EN_1; | 596 | i2c.mask_clk_mask = RADEON_GPIO_EN_1; |
584 | i2c.mask_data_mask = RADEON_GPIO_EN_0; | 597 | i2c.mask_data_mask = RADEON_GPIO_EN_0; |
585 | i2c.a_clk_mask = RADEON_GPIO_A_1; | 598 | i2c.a_clk_mask = RADEON_GPIO_A_1; |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index fe6c74780f18..3bef9f6d66fd 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -1008,9 +1008,21 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector) | |||
1008 | static int radeon_dp_get_modes(struct drm_connector *connector) | 1008 | static int radeon_dp_get_modes(struct drm_connector *connector) |
1009 | { | 1009 | { |
1010 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 1010 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
1011 | struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; | ||
1011 | int ret; | 1012 | int ret; |
1012 | 1013 | ||
1014 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
1015 | if (!radeon_dig_connector->edp_on) | ||
1016 | atombios_set_edp_panel_power(connector, | ||
1017 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
1018 | } | ||
1013 | ret = radeon_ddc_get_modes(radeon_connector); | 1019 | ret = radeon_ddc_get_modes(radeon_connector); |
1020 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
1021 | if (!radeon_dig_connector->edp_on) | ||
1022 | atombios_set_edp_panel_power(connector, | ||
1023 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | ||
1024 | } | ||
1025 | |||
1014 | return ret; | 1026 | return ret; |
1015 | } | 1027 | } |
1016 | 1028 | ||
@@ -1029,8 +1041,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1029 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | 1041 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { |
1030 | /* eDP is always DP */ | 1042 | /* eDP is always DP */ |
1031 | radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; | 1043 | radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; |
1044 | if (!radeon_dig_connector->edp_on) | ||
1045 | atombios_set_edp_panel_power(connector, | ||
1046 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
1032 | if (radeon_dp_getdpcd(radeon_connector)) | 1047 | if (radeon_dp_getdpcd(radeon_connector)) |
1033 | ret = connector_status_connected; | 1048 | ret = connector_status_connected; |
1049 | if (!radeon_dig_connector->edp_on) | ||
1050 | atombios_set_edp_panel_power(connector, | ||
1051 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | ||
1034 | } else { | 1052 | } else { |
1035 | radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); | 1053 | radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); |
1036 | if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { | 1054 | if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 8adfedfe547f..d8ac1849180d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -180,7 +180,7 @@ int radeon_wb_init(struct radeon_device *rdev) | |||
180 | int r; | 180 | int r; |
181 | 181 | ||
182 | if (rdev->wb.wb_obj == NULL) { | 182 | if (rdev->wb.wb_obj == NULL) { |
183 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, | 183 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, |
184 | RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); | 184 | RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); |
185 | if (r) { | 185 | if (r) { |
186 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); | 186 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index f678257c42e6..041943df966b 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -176,6 +176,7 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder) | |||
176 | return false; | 176 | return false; |
177 | } | 177 | } |
178 | } | 178 | } |
179 | |||
179 | void | 180 | void |
180 | radeon_link_encoder_connector(struct drm_device *dev) | 181 | radeon_link_encoder_connector(struct drm_device *dev) |
181 | { | 182 | { |
@@ -228,6 +229,27 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) | |||
228 | return NULL; | 229 | return NULL; |
229 | } | 230 | } |
230 | 231 | ||
232 | struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder) | ||
233 | { | ||
234 | struct drm_device *dev = encoder->dev; | ||
235 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
236 | struct drm_encoder *other_encoder; | ||
237 | struct radeon_encoder *other_radeon_encoder; | ||
238 | |||
239 | if (radeon_encoder->is_ext_encoder) | ||
240 | return NULL; | ||
241 | |||
242 | list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { | ||
243 | if (other_encoder == encoder) | ||
244 | continue; | ||
245 | other_radeon_encoder = to_radeon_encoder(other_encoder); | ||
246 | if (other_radeon_encoder->is_ext_encoder && | ||
247 | (radeon_encoder->devices & other_radeon_encoder->devices)) | ||
248 | return other_encoder; | ||
249 | } | ||
250 | return NULL; | ||
251 | } | ||
252 | |||
231 | void radeon_panel_mode_fixup(struct drm_encoder *encoder, | 253 | void radeon_panel_mode_fixup(struct drm_encoder *encoder, |
232 | struct drm_display_mode *adjusted_mode) | 254 | struct drm_display_mode *adjusted_mode) |
233 | { | 255 | { |
@@ -426,52 +448,49 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) | |||
426 | 448 | ||
427 | } | 449 | } |
428 | 450 | ||
429 | void | 451 | union dvo_encoder_control { |
430 | atombios_external_tmds_setup(struct drm_encoder *encoder, int action) | 452 | ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; |
431 | { | 453 | DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; |
432 | struct drm_device *dev = encoder->dev; | 454 | DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3; |
433 | struct radeon_device *rdev = dev->dev_private; | 455 | }; |
434 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
435 | ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION args; | ||
436 | int index = 0; | ||
437 | |||
438 | memset(&args, 0, sizeof(args)); | ||
439 | |||
440 | index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); | ||
441 | |||
442 | args.sXTmdsEncoder.ucEnable = action; | ||
443 | |||
444 | if (radeon_encoder->pixel_clock > 165000) | ||
445 | args.sXTmdsEncoder.ucMisc = PANEL_ENCODER_MISC_DUAL; | ||
446 | |||
447 | /*if (pScrn->rgbBits == 8)*/ | ||
448 | args.sXTmdsEncoder.ucMisc |= (1 << 1); | ||
449 | |||
450 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
451 | |||
452 | } | ||
453 | 456 | ||
454 | static void | 457 | void |
455 | atombios_ddia_setup(struct drm_encoder *encoder, int action) | 458 | atombios_dvo_setup(struct drm_encoder *encoder, int action) |
456 | { | 459 | { |
457 | struct drm_device *dev = encoder->dev; | 460 | struct drm_device *dev = encoder->dev; |
458 | struct radeon_device *rdev = dev->dev_private; | 461 | struct radeon_device *rdev = dev->dev_private; |
459 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 462 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
460 | DVO_ENCODER_CONTROL_PS_ALLOCATION args; | 463 | union dvo_encoder_control args; |
461 | int index = 0; | 464 | int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); |
462 | 465 | ||
463 | memset(&args, 0, sizeof(args)); | 466 | memset(&args, 0, sizeof(args)); |
464 | 467 | ||
465 | index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); | 468 | if (ASIC_IS_DCE3(rdev)) { |
469 | /* DCE3+ */ | ||
470 | args.dvo_v3.ucAction = action; | ||
471 | args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
472 | args.dvo_v3.ucDVOConfig = 0; /* XXX */ | ||
473 | } else if (ASIC_IS_DCE2(rdev)) { | ||
474 | /* DCE2 (pre-DCE3 R6xx, RS600/690/740 */ | ||
475 | args.dvo.sDVOEncoder.ucAction = action; | ||
476 | args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
477 | /* DFP1, CRT1, TV1 depending on the type of port */ | ||
478 | args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX; | ||
479 | |||
480 | if (radeon_encoder->pixel_clock > 165000) | ||
481 | args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL; | ||
482 | } else { | ||
483 | /* R4xx, R5xx */ | ||
484 | args.ext_tmds.sXTmdsEncoder.ucEnable = action; | ||
466 | 485 | ||
467 | args.sDVOEncoder.ucAction = action; | 486 | if (radeon_encoder->pixel_clock > 165000) |
468 | args.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | 487 | args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
469 | 488 | ||
470 | if (radeon_encoder->pixel_clock > 165000) | 489 | /*if (pScrn->rgbBits == 8)*/ |
471 | args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = PANEL_ENCODER_MISC_DUAL; | 490 | args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB; |
491 | } | ||
472 | 492 | ||
473 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 493 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
474 | |||
475 | } | 494 | } |
476 | 495 | ||
477 | union lvds_encoder_control { | 496 | union lvds_encoder_control { |
@@ -532,14 +551,14 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
532 | if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) | 551 | if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) |
533 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; | 552 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
534 | if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) | 553 | if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) |
535 | args.v1.ucMisc |= (1 << 1); | 554 | args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; |
536 | } else { | 555 | } else { |
537 | if (dig->linkb) | 556 | if (dig->linkb) |
538 | args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; | 557 | args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; |
539 | if (radeon_encoder->pixel_clock > 165000) | 558 | if (radeon_encoder->pixel_clock > 165000) |
540 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; | 559 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
541 | /*if (pScrn->rgbBits == 8) */ | 560 | /*if (pScrn->rgbBits == 8) */ |
542 | args.v1.ucMisc |= (1 << 1); | 561 | args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; |
543 | } | 562 | } |
544 | break; | 563 | break; |
545 | case 2: | 564 | case 2: |
@@ -595,6 +614,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
595 | int | 614 | int |
596 | atombios_get_encoder_mode(struct drm_encoder *encoder) | 615 | atombios_get_encoder_mode(struct drm_encoder *encoder) |
597 | { | 616 | { |
617 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
598 | struct drm_device *dev = encoder->dev; | 618 | struct drm_device *dev = encoder->dev; |
599 | struct radeon_device *rdev = dev->dev_private; | 619 | struct radeon_device *rdev = dev->dev_private; |
600 | struct drm_connector *connector; | 620 | struct drm_connector *connector; |
@@ -602,9 +622,20 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
602 | struct radeon_connector_atom_dig *dig_connector; | 622 | struct radeon_connector_atom_dig *dig_connector; |
603 | 623 | ||
604 | connector = radeon_get_connector_for_encoder(encoder); | 624 | connector = radeon_get_connector_for_encoder(encoder); |
605 | if (!connector) | 625 | if (!connector) { |
606 | return 0; | 626 | switch (radeon_encoder->encoder_id) { |
607 | 627 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | |
628 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
629 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
630 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
631 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
632 | return ATOM_ENCODER_MODE_DVI; | ||
633 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
634 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
635 | default: | ||
636 | return ATOM_ENCODER_MODE_CRT; | ||
637 | } | ||
638 | } | ||
608 | radeon_connector = to_radeon_connector(connector); | 639 | radeon_connector = to_radeon_connector(connector); |
609 | 640 | ||
610 | switch (connector->connector_type) { | 641 | switch (connector->connector_type) { |
@@ -834,6 +865,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
834 | memset(&args, 0, sizeof(args)); | 865 | memset(&args, 0, sizeof(args)); |
835 | 866 | ||
836 | switch (radeon_encoder->encoder_id) { | 867 | switch (radeon_encoder->encoder_id) { |
868 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
869 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | ||
870 | break; | ||
837 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 871 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
838 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 872 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
839 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 873 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
@@ -978,6 +1012,105 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
978 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1012 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
979 | } | 1013 | } |
980 | 1014 | ||
1015 | void | ||
1016 | atombios_set_edp_panel_power(struct drm_connector *connector, int action) | ||
1017 | { | ||
1018 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1019 | struct drm_device *dev = radeon_connector->base.dev; | ||
1020 | struct radeon_device *rdev = dev->dev_private; | ||
1021 | union dig_transmitter_control args; | ||
1022 | int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); | ||
1023 | uint8_t frev, crev; | ||
1024 | |||
1025 | if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) | ||
1026 | return; | ||
1027 | |||
1028 | if (!ASIC_IS_DCE4(rdev)) | ||
1029 | return; | ||
1030 | |||
1031 | if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) || | ||
1032 | (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) | ||
1033 | return; | ||
1034 | |||
1035 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
1036 | return; | ||
1037 | |||
1038 | memset(&args, 0, sizeof(args)); | ||
1039 | |||
1040 | args.v1.ucAction = action; | ||
1041 | |||
1042 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1043 | } | ||
1044 | |||
1045 | union external_encoder_control { | ||
1046 | EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; | ||
1047 | }; | ||
1048 | |||
1049 | static void | ||
1050 | atombios_external_encoder_setup(struct drm_encoder *encoder, | ||
1051 | struct drm_encoder *ext_encoder, | ||
1052 | int action) | ||
1053 | { | ||
1054 | struct drm_device *dev = encoder->dev; | ||
1055 | struct radeon_device *rdev = dev->dev_private; | ||
1056 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1057 | union external_encoder_control args; | ||
1058 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1059 | int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); | ||
1060 | u8 frev, crev; | ||
1061 | int dp_clock = 0; | ||
1062 | int dp_lane_count = 0; | ||
1063 | int connector_object_id = 0; | ||
1064 | |||
1065 | if (connector) { | ||
1066 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1067 | struct radeon_connector_atom_dig *dig_connector = | ||
1068 | radeon_connector->con_priv; | ||
1069 | |||
1070 | dp_clock = dig_connector->dp_clock; | ||
1071 | dp_lane_count = dig_connector->dp_lane_count; | ||
1072 | connector_object_id = | ||
1073 | (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
1074 | } | ||
1075 | |||
1076 | memset(&args, 0, sizeof(args)); | ||
1077 | |||
1078 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
1079 | return; | ||
1080 | |||
1081 | switch (frev) { | ||
1082 | case 1: | ||
1083 | /* no params on frev 1 */ | ||
1084 | break; | ||
1085 | case 2: | ||
1086 | switch (crev) { | ||
1087 | case 1: | ||
1088 | case 2: | ||
1089 | args.v1.sDigEncoder.ucAction = action; | ||
1090 | args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
1091 | args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); | ||
1092 | |||
1093 | if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) { | ||
1094 | if (dp_clock == 270000) | ||
1095 | args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; | ||
1096 | args.v1.sDigEncoder.ucLaneNum = dp_lane_count; | ||
1097 | } else if (radeon_encoder->pixel_clock > 165000) | ||
1098 | args.v1.sDigEncoder.ucLaneNum = 8; | ||
1099 | else | ||
1100 | args.v1.sDigEncoder.ucLaneNum = 4; | ||
1101 | break; | ||
1102 | default: | ||
1103 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | ||
1104 | return; | ||
1105 | } | ||
1106 | break; | ||
1107 | default: | ||
1108 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | ||
1109 | return; | ||
1110 | } | ||
1111 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1112 | } | ||
1113 | |||
981 | static void | 1114 | static void |
982 | atombios_yuv_setup(struct drm_encoder *encoder, bool enable) | 1115 | atombios_yuv_setup(struct drm_encoder *encoder, bool enable) |
983 | { | 1116 | { |
@@ -1021,6 +1154,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1021 | struct drm_device *dev = encoder->dev; | 1154 | struct drm_device *dev = encoder->dev; |
1022 | struct radeon_device *rdev = dev->dev_private; | 1155 | struct radeon_device *rdev = dev->dev_private; |
1023 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1156 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1157 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | ||
1024 | DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; | 1158 | DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; |
1025 | int index = 0; | 1159 | int index = 0; |
1026 | bool is_dig = false; | 1160 | bool is_dig = false; |
@@ -1043,9 +1177,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1043 | break; | 1177 | break; |
1044 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | 1178 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
1045 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 1179 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
1046 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
1047 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | 1180 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); |
1048 | break; | 1181 | break; |
1182 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
1183 | if (ASIC_IS_DCE3(rdev)) | ||
1184 | is_dig = true; | ||
1185 | else | ||
1186 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | ||
1187 | break; | ||
1049 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | 1188 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
1050 | index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); | 1189 | index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); |
1051 | break; | 1190 | break; |
@@ -1082,34 +1221,85 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1082 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | 1221 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { |
1083 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 1222 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
1084 | 1223 | ||
1224 | if (connector && | ||
1225 | (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { | ||
1226 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1227 | struct radeon_connector_atom_dig *radeon_dig_connector = | ||
1228 | radeon_connector->con_priv; | ||
1229 | atombios_set_edp_panel_power(connector, | ||
1230 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
1231 | radeon_dig_connector->edp_on = true; | ||
1232 | } | ||
1085 | dp_link_train(encoder, connector); | 1233 | dp_link_train(encoder, connector); |
1086 | if (ASIC_IS_DCE4(rdev)) | 1234 | if (ASIC_IS_DCE4(rdev)) |
1087 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); | 1235 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); |
1088 | } | 1236 | } |
1237 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
1238 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); | ||
1089 | break; | 1239 | break; |
1090 | case DRM_MODE_DPMS_STANDBY: | 1240 | case DRM_MODE_DPMS_STANDBY: |
1091 | case DRM_MODE_DPMS_SUSPEND: | 1241 | case DRM_MODE_DPMS_SUSPEND: |
1092 | case DRM_MODE_DPMS_OFF: | 1242 | case DRM_MODE_DPMS_OFF: |
1093 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); | 1243 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); |
1094 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | 1244 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { |
1245 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1246 | |||
1095 | if (ASIC_IS_DCE4(rdev)) | 1247 | if (ASIC_IS_DCE4(rdev)) |
1096 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); | 1248 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); |
1249 | if (connector && | ||
1250 | (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { | ||
1251 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1252 | struct radeon_connector_atom_dig *radeon_dig_connector = | ||
1253 | radeon_connector->con_priv; | ||
1254 | atombios_set_edp_panel_power(connector, | ||
1255 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | ||
1256 | radeon_dig_connector->edp_on = false; | ||
1257 | } | ||
1097 | } | 1258 | } |
1259 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
1260 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); | ||
1098 | break; | 1261 | break; |
1099 | } | 1262 | } |
1100 | } else { | 1263 | } else { |
1101 | switch (mode) { | 1264 | switch (mode) { |
1102 | case DRM_MODE_DPMS_ON: | 1265 | case DRM_MODE_DPMS_ON: |
1103 | args.ucAction = ATOM_ENABLE; | 1266 | args.ucAction = ATOM_ENABLE; |
1267 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1268 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
1269 | args.ucAction = ATOM_LCD_BLON; | ||
1270 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1271 | } | ||
1104 | break; | 1272 | break; |
1105 | case DRM_MODE_DPMS_STANDBY: | 1273 | case DRM_MODE_DPMS_STANDBY: |
1106 | case DRM_MODE_DPMS_SUSPEND: | 1274 | case DRM_MODE_DPMS_SUSPEND: |
1107 | case DRM_MODE_DPMS_OFF: | 1275 | case DRM_MODE_DPMS_OFF: |
1108 | args.ucAction = ATOM_DISABLE; | 1276 | args.ucAction = ATOM_DISABLE; |
1277 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1278 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
1279 | args.ucAction = ATOM_LCD_BLOFF; | ||
1280 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1281 | } | ||
1109 | break; | 1282 | break; |
1110 | } | 1283 | } |
1111 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1112 | } | 1284 | } |
1285 | |||
1286 | if (ext_encoder) { | ||
1287 | int action; | ||
1288 | |||
1289 | switch (mode) { | ||
1290 | case DRM_MODE_DPMS_ON: | ||
1291 | default: | ||
1292 | action = ATOM_ENABLE; | ||
1293 | break; | ||
1294 | case DRM_MODE_DPMS_STANDBY: | ||
1295 | case DRM_MODE_DPMS_SUSPEND: | ||
1296 | case DRM_MODE_DPMS_OFF: | ||
1297 | action = ATOM_DISABLE; | ||
1298 | break; | ||
1299 | } | ||
1300 | atombios_external_encoder_setup(encoder, ext_encoder, action); | ||
1301 | } | ||
1302 | |||
1113 | radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); | 1303 | radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); |
1114 | 1304 | ||
1115 | } | 1305 | } |
@@ -1242,7 +1432,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) | |||
1242 | break; | 1432 | break; |
1243 | default: | 1433 | default: |
1244 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | 1434 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); |
1245 | break; | 1435 | return; |
1246 | } | 1436 | } |
1247 | 1437 | ||
1248 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1438 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
@@ -1357,6 +1547,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1357 | struct drm_device *dev = encoder->dev; | 1547 | struct drm_device *dev = encoder->dev; |
1358 | struct radeon_device *rdev = dev->dev_private; | 1548 | struct radeon_device *rdev = dev->dev_private; |
1359 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1549 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1550 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | ||
1360 | 1551 | ||
1361 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 1552 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
1362 | 1553 | ||
@@ -1400,11 +1591,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1400 | } | 1591 | } |
1401 | break; | 1592 | break; |
1402 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 1593 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
1403 | atombios_ddia_setup(encoder, ATOM_ENABLE); | ||
1404 | break; | ||
1405 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | 1594 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
1406 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | 1595 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
1407 | atombios_external_tmds_setup(encoder, ATOM_ENABLE); | 1596 | atombios_dvo_setup(encoder, ATOM_ENABLE); |
1408 | break; | 1597 | break; |
1409 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | 1598 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
1410 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | 1599 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
@@ -1419,6 +1608,11 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1419 | } | 1608 | } |
1420 | break; | 1609 | break; |
1421 | } | 1610 | } |
1611 | |||
1612 | if (ext_encoder) { | ||
1613 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); | ||
1614 | } | ||
1615 | |||
1422 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | 1616 | atombios_apply_encoder_quirks(encoder, adjusted_mode); |
1423 | 1617 | ||
1424 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { | 1618 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { |
@@ -1595,11 +1789,9 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | |||
1595 | } | 1789 | } |
1596 | break; | 1790 | break; |
1597 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 1791 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
1598 | atombios_ddia_setup(encoder, ATOM_DISABLE); | ||
1599 | break; | ||
1600 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | 1792 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
1601 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | 1793 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
1602 | atombios_external_tmds_setup(encoder, ATOM_DISABLE); | 1794 | atombios_dvo_setup(encoder, ATOM_DISABLE); |
1603 | break; | 1795 | break; |
1604 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | 1796 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
1605 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | 1797 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
@@ -1621,6 +1813,53 @@ disable_done: | |||
1621 | radeon_encoder->active_device = 0; | 1813 | radeon_encoder->active_device = 0; |
1622 | } | 1814 | } |
1623 | 1815 | ||
1816 | /* these are handled by the primary encoders */ | ||
1817 | static void radeon_atom_ext_prepare(struct drm_encoder *encoder) | ||
1818 | { | ||
1819 | |||
1820 | } | ||
1821 | |||
1822 | static void radeon_atom_ext_commit(struct drm_encoder *encoder) | ||
1823 | { | ||
1824 | |||
1825 | } | ||
1826 | |||
1827 | static void | ||
1828 | radeon_atom_ext_mode_set(struct drm_encoder *encoder, | ||
1829 | struct drm_display_mode *mode, | ||
1830 | struct drm_display_mode *adjusted_mode) | ||
1831 | { | ||
1832 | |||
1833 | } | ||
1834 | |||
1835 | static void radeon_atom_ext_disable(struct drm_encoder *encoder) | ||
1836 | { | ||
1837 | |||
1838 | } | ||
1839 | |||
1840 | static void | ||
1841 | radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode) | ||
1842 | { | ||
1843 | |||
1844 | } | ||
1845 | |||
1846 | static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder, | ||
1847 | struct drm_display_mode *mode, | ||
1848 | struct drm_display_mode *adjusted_mode) | ||
1849 | { | ||
1850 | return true; | ||
1851 | } | ||
1852 | |||
1853 | static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = { | ||
1854 | .dpms = radeon_atom_ext_dpms, | ||
1855 | .mode_fixup = radeon_atom_ext_mode_fixup, | ||
1856 | .prepare = radeon_atom_ext_prepare, | ||
1857 | .mode_set = radeon_atom_ext_mode_set, | ||
1858 | .commit = radeon_atom_ext_commit, | ||
1859 | .disable = radeon_atom_ext_disable, | ||
1860 | /* no detect for TMDS/LVDS yet */ | ||
1861 | }; | ||
1862 | |||
1624 | static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { | 1863 | static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { |
1625 | .dpms = radeon_atom_encoder_dpms, | 1864 | .dpms = radeon_atom_encoder_dpms, |
1626 | .mode_fixup = radeon_atom_mode_fixup, | 1865 | .mode_fixup = radeon_atom_mode_fixup, |
@@ -1730,6 +1969,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t | |||
1730 | radeon_encoder->devices = supported_device; | 1969 | radeon_encoder->devices = supported_device; |
1731 | radeon_encoder->rmx_type = RMX_OFF; | 1970 | radeon_encoder->rmx_type = RMX_OFF; |
1732 | radeon_encoder->underscan_type = UNDERSCAN_OFF; | 1971 | radeon_encoder->underscan_type = UNDERSCAN_OFF; |
1972 | radeon_encoder->is_ext_encoder = false; | ||
1733 | 1973 | ||
1734 | switch (radeon_encoder->encoder_id) { | 1974 | switch (radeon_encoder->encoder_id) { |
1735 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | 1975 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
@@ -1771,6 +2011,9 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t | |||
1771 | radeon_encoder->rmx_type = RMX_FULL; | 2011 | radeon_encoder->rmx_type = RMX_FULL; |
1772 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); | 2012 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); |
1773 | radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); | 2013 | radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); |
2014 | } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { | ||
2015 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); | ||
2016 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); | ||
1774 | } else { | 2017 | } else { |
1775 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); | 2018 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); |
1776 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); | 2019 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); |
@@ -1779,5 +2022,22 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t | |||
1779 | } | 2022 | } |
1780 | drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); | 2023 | drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); |
1781 | break; | 2024 | break; |
2025 | case ENCODER_OBJECT_ID_SI170B: | ||
2026 | case ENCODER_OBJECT_ID_CH7303: | ||
2027 | case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: | ||
2028 | case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: | ||
2029 | case ENCODER_OBJECT_ID_TITFP513: | ||
2030 | case ENCODER_OBJECT_ID_VT1623: | ||
2031 | case ENCODER_OBJECT_ID_HDMI_SI1930: | ||
2032 | /* these are handled by the primary encoders */ | ||
2033 | radeon_encoder->is_ext_encoder = true; | ||
2034 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
2035 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); | ||
2036 | else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) | ||
2037 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); | ||
2038 | else | ||
2039 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); | ||
2040 | drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs); | ||
2041 | break; | ||
1782 | } | 2042 | } |
1783 | } | 2043 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index e65b90317fab..65016117d95f 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -79,8 +79,8 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) | |||
79 | 79 | ||
80 | if (rdev->gart.table.vram.robj == NULL) { | 80 | if (rdev->gart.table.vram.robj == NULL) { |
81 | r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, | 81 | r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, |
82 | true, RADEON_GEM_DOMAIN_VRAM, | 82 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
83 | &rdev->gart.table.vram.robj); | 83 | &rdev->gart.table.vram.robj); |
84 | if (r) { | 84 | if (r) { |
85 | return r; | 85 | return r; |
86 | } | 86 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index d1e595d91723..df95eb83dac6 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -67,7 +67,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, | |||
67 | if (alignment < PAGE_SIZE) { | 67 | if (alignment < PAGE_SIZE) { |
68 | alignment = PAGE_SIZE; | 68 | alignment = PAGE_SIZE; |
69 | } | 69 | } |
70 | r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj); | 70 | r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj); |
71 | if (r) { | 71 | if (r) { |
72 | if (r != -ERESTARTSYS) | 72 | if (r != -ERESTARTSYS) |
73 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", | 73 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 0cfbba02c4d0..ded2a45bc95c 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -896,7 +896,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | |||
896 | ((rdev->family <= CHIP_RS480) || | 896 | ((rdev->family <= CHIP_RS480) || |
897 | ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) { | 897 | ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) { |
898 | /* set the radeon hw i2c adapter */ | 898 | /* set the radeon hw i2c adapter */ |
899 | sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name); | 899 | snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), |
900 | "Radeon i2c hw bus %s", name); | ||
900 | i2c->adapter.algo = &radeon_i2c_algo; | 901 | i2c->adapter.algo = &radeon_i2c_algo; |
901 | ret = i2c_add_adapter(&i2c->adapter); | 902 | ret = i2c_add_adapter(&i2c->adapter); |
902 | if (ret) { | 903 | if (ret) { |
@@ -905,7 +906,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | |||
905 | } | 906 | } |
906 | } else { | 907 | } else { |
907 | /* set the radeon bit adapter */ | 908 | /* set the radeon bit adapter */ |
908 | sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name); | 909 | snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), |
910 | "Radeon i2c bit bus %s", name); | ||
909 | i2c->adapter.algo_data = &i2c->algo.bit; | 911 | i2c->adapter.algo_data = &i2c->algo.bit; |
910 | i2c->algo.bit.pre_xfer = pre_xfer; | 912 | i2c->algo.bit.pre_xfer = pre_xfer; |
911 | i2c->algo.bit.post_xfer = post_xfer; | 913 | i2c->algo.bit.post_xfer = post_xfer; |
@@ -946,6 +948,8 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev, | |||
946 | i2c->rec = *rec; | 948 | i2c->rec = *rec; |
947 | i2c->adapter.owner = THIS_MODULE; | 949 | i2c->adapter.owner = THIS_MODULE; |
948 | i2c->dev = dev; | 950 | i2c->dev = dev; |
951 | snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), | ||
952 | "Radeon aux bus %s", name); | ||
949 | i2c_set_adapdata(&i2c->adapter, i2c); | 953 | i2c_set_adapdata(&i2c->adapter, i2c); |
950 | i2c->adapter.algo_data = &i2c->algo.dp; | 954 | i2c->adapter.algo_data = &i2c->algo.dp; |
951 | i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch; | 955 | i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch; |
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c index 2f349a300195..465746bd51b7 100644 --- a/drivers/gpu/drm/radeon/radeon_irq.c +++ b/drivers/gpu/drm/radeon/radeon_irq.c | |||
@@ -76,7 +76,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc) | |||
76 | default: | 76 | default: |
77 | DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", | 77 | DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", |
78 | crtc); | 78 | crtc); |
79 | return EINVAL; | 79 | return -EINVAL; |
80 | } | 80 | } |
81 | } else { | 81 | } else { |
82 | switch (crtc) { | 82 | switch (crtc) { |
@@ -89,7 +89,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc) | |||
89 | default: | 89 | default: |
90 | DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", | 90 | DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", |
91 | crtc); | 91 | crtc); |
92 | return EINVAL; | 92 | return -EINVAL; |
93 | } | 93 | } |
94 | } | 94 | } |
95 | 95 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 0b8397000f4c..59f834ba283d 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
@@ -670,7 +670,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, | |||
670 | 670 | ||
671 | if (rdev->is_atom_bios) { | 671 | if (rdev->is_atom_bios) { |
672 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 672 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
673 | atombios_external_tmds_setup(encoder, ATOM_ENABLE); | 673 | atombios_dvo_setup(encoder, ATOM_ENABLE); |
674 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); | 674 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); |
675 | } else { | 675 | } else { |
676 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); | 676 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 680f57644e86..e301c6f9e059 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -375,6 +375,7 @@ struct radeon_encoder { | |||
375 | int hdmi_config_offset; | 375 | int hdmi_config_offset; |
376 | int hdmi_audio_workaround; | 376 | int hdmi_audio_workaround; |
377 | int hdmi_buffer_status; | 377 | int hdmi_buffer_status; |
378 | bool is_ext_encoder; | ||
378 | }; | 379 | }; |
379 | 380 | ||
380 | struct radeon_connector_atom_dig { | 381 | struct radeon_connector_atom_dig { |
@@ -385,6 +386,7 @@ struct radeon_connector_atom_dig { | |||
385 | u8 dp_sink_type; | 386 | u8 dp_sink_type; |
386 | int dp_clock; | 387 | int dp_clock; |
387 | int dp_lane_count; | 388 | int dp_lane_count; |
389 | bool edp_on; | ||
388 | }; | 390 | }; |
389 | 391 | ||
390 | struct radeon_gpio_rec { | 392 | struct radeon_gpio_rec { |
@@ -523,9 +525,10 @@ struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev | |||
523 | struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); | 525 | struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); |
524 | struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); | 526 | struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); |
525 | struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); | 527 | struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); |
526 | extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action); | 528 | extern void atombios_dvo_setup(struct drm_encoder *encoder, int action); |
527 | extern void atombios_digital_setup(struct drm_encoder *encoder, int action); | 529 | extern void atombios_digital_setup(struct drm_encoder *encoder, int action); |
528 | extern int atombios_get_encoder_mode(struct drm_encoder *encoder); | 530 | extern int atombios_get_encoder_mode(struct drm_encoder *encoder); |
531 | extern void atombios_set_edp_panel_power(struct drm_connector *connector, int action); | ||
529 | extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); | 532 | extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); |
530 | 533 | ||
531 | extern void radeon_crtc_load_lut(struct drm_crtc *crtc); | 534 | extern void radeon_crtc_load_lut(struct drm_crtc *crtc); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 8eb183466015..1d067743fee0 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -86,11 +86,12 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) | |||
86 | } | 86 | } |
87 | 87 | ||
88 | int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | 88 | int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, |
89 | unsigned long size, bool kernel, u32 domain, | 89 | unsigned long size, int byte_align, bool kernel, u32 domain, |
90 | struct radeon_bo **bo_ptr) | 90 | struct radeon_bo **bo_ptr) |
91 | { | 91 | { |
92 | struct radeon_bo *bo; | 92 | struct radeon_bo *bo; |
93 | enum ttm_bo_type type; | 93 | enum ttm_bo_type type; |
94 | int page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; | ||
94 | int r; | 95 | int r; |
95 | 96 | ||
96 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { | 97 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { |
@@ -115,7 +116,7 @@ retry: | |||
115 | /* Kernel allocation are uninterruptible */ | 116 | /* Kernel allocation are uninterruptible */ |
116 | mutex_lock(&rdev->vram_mutex); | 117 | mutex_lock(&rdev->vram_mutex); |
117 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, | 118 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
118 | &bo->placement, 0, 0, !kernel, NULL, size, | 119 | &bo->placement, page_align, 0, !kernel, NULL, size, |
119 | &radeon_ttm_bo_destroy); | 120 | &radeon_ttm_bo_destroy); |
120 | mutex_unlock(&rdev->vram_mutex); | 121 | mutex_unlock(&rdev->vram_mutex); |
121 | if (unlikely(r != 0)) { | 122 | if (unlikely(r != 0)) { |
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 3481bc7f6f58..d143702b244a 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
@@ -137,9 +137,10 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, | |||
137 | } | 137 | } |
138 | 138 | ||
139 | extern int radeon_bo_create(struct radeon_device *rdev, | 139 | extern int radeon_bo_create(struct radeon_device *rdev, |
140 | struct drm_gem_object *gobj, unsigned long size, | 140 | struct drm_gem_object *gobj, unsigned long size, |
141 | bool kernel, u32 domain, | 141 | int byte_align, |
142 | struct radeon_bo **bo_ptr); | 142 | bool kernel, u32 domain, |
143 | struct radeon_bo **bo_ptr); | ||
143 | extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); | 144 | extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); |
144 | extern void radeon_bo_kunmap(struct radeon_bo *bo); | 145 | extern void radeon_bo_kunmap(struct radeon_bo *bo); |
145 | extern void radeon_bo_unref(struct radeon_bo **bo); | 146 | extern void radeon_bo_unref(struct radeon_bo **bo); |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 6ea798ce8218..06e79822a2bf 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -176,8 +176,8 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
176 | INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); | 176 | INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); |
177 | /* Allocate 1M object buffer */ | 177 | /* Allocate 1M object buffer */ |
178 | r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, | 178 | r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, |
179 | true, RADEON_GEM_DOMAIN_GTT, | 179 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, |
180 | &rdev->ib_pool.robj); | 180 | &rdev->ib_pool.robj); |
181 | if (r) { | 181 | if (r) { |
182 | DRM_ERROR("radeon: failed to ib pool (%d).\n", r); | 182 | DRM_ERROR("radeon: failed to ib pool (%d).\n", r); |
183 | return r; | 183 | return r; |
@@ -332,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
332 | rdev->cp.ring_size = ring_size; | 332 | rdev->cp.ring_size = ring_size; |
333 | /* Allocate ring buffer */ | 333 | /* Allocate ring buffer */ |
334 | if (rdev->cp.ring_obj == NULL) { | 334 | if (rdev->cp.ring_obj == NULL) { |
335 | r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true, | 335 | r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true, |
336 | RADEON_GEM_DOMAIN_GTT, | 336 | RADEON_GEM_DOMAIN_GTT, |
337 | &rdev->cp.ring_obj); | 337 | &rdev->cp.ring_obj); |
338 | if (r) { | 338 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 313c96bc09da..5b44f652145c 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
@@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
52 | goto out_cleanup; | 52 | goto out_cleanup; |
53 | } | 53 | } |
54 | 54 | ||
55 | r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, | 55 | r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
56 | &vram_obj); | 56 | &vram_obj); |
57 | if (r) { | 57 | if (r) { |
58 | DRM_ERROR("Failed to create VRAM object\n"); | 58 | DRM_ERROR("Failed to create VRAM object\n"); |
@@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
71 | void **gtt_start, **gtt_end; | 71 | void **gtt_start, **gtt_end; |
72 | void **vram_start, **vram_end; | 72 | void **vram_start, **vram_end; |
73 | 73 | ||
74 | r = radeon_bo_create(rdev, NULL, size, true, | 74 | r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, |
75 | RADEON_GEM_DOMAIN_GTT, gtt_obj + i); | 75 | RADEON_GEM_DOMAIN_GTT, gtt_obj + i); |
76 | if (r) { | 76 | if (r) { |
77 | DRM_ERROR("Failed to create GTT object %d\n", i); | 77 | DRM_ERROR("Failed to create GTT object %d\n", i); |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 01c2c736a1da..1272e4b6a1d4 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -529,7 +529,7 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
529 | DRM_ERROR("Failed initializing VRAM heap.\n"); | 529 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
530 | return r; | 530 | return r; |
531 | } | 531 | } |
532 | r = radeon_bo_create(rdev, NULL, 256 * 1024, true, | 532 | r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true, |
533 | RADEON_GEM_DOMAIN_VRAM, | 533 | RADEON_GEM_DOMAIN_VRAM, |
534 | &rdev->stollen_vga_memory); | 534 | &rdev->stollen_vga_memory); |
535 | if (r) { | 535 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 245374e2b778..4dfead8cee33 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -915,8 +915,8 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev) | |||
915 | 915 | ||
916 | if (rdev->vram_scratch.robj == NULL) { | 916 | if (rdev->vram_scratch.robj == NULL) { |
917 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, | 917 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, |
918 | true, RADEON_GEM_DOMAIN_VRAM, | 918 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
919 | &rdev->vram_scratch.robj); | 919 | &rdev->vram_scratch.robj); |
920 | if (r) { | 920 | if (r) { |
921 | return r; | 921 | return r; |
922 | } | 922 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 3ca77dc03915..148a322d8f5d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -224,6 +224,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, | |||
224 | int ret; | 224 | int ret; |
225 | 225 | ||
226 | while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { | 226 | while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { |
227 | /** | ||
228 | * Deadlock avoidance for multi-bo reserving. | ||
229 | */ | ||
227 | if (use_sequence && bo->seq_valid && | 230 | if (use_sequence && bo->seq_valid && |
228 | (sequence - bo->val_seq < (1 << 31))) { | 231 | (sequence - bo->val_seq < (1 << 31))) { |
229 | return -EAGAIN; | 232 | return -EAGAIN; |
@@ -241,6 +244,14 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, | |||
241 | } | 244 | } |
242 | 245 | ||
243 | if (use_sequence) { | 246 | if (use_sequence) { |
247 | /** | ||
248 | * Wake up waiters that may need to recheck for deadlock, | ||
249 | * if we decreased the sequence number. | ||
250 | */ | ||
251 | if (unlikely((bo->val_seq - sequence < (1 << 31)) | ||
252 | || !bo->seq_valid)) | ||
253 | wake_up_all(&bo->event_queue); | ||
254 | |||
244 | bo->val_seq = sequence; | 255 | bo->val_seq = sequence; |
245 | bo->seq_valid = true; | 256 | bo->seq_valid = true; |
246 | } else { | 257 | } else { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 36e129f0023f..5408b1b7996f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -862,7 +862,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
862 | &vmw_vram_sys_placement, true, | 862 | &vmw_vram_sys_placement, true, |
863 | &vmw_user_dmabuf_destroy); | 863 | &vmw_user_dmabuf_destroy); |
864 | if (unlikely(ret != 0)) | 864 | if (unlikely(ret != 0)) |
865 | return ret; | 865 | goto out_no_dmabuf; |
866 | 866 | ||
867 | tmp = ttm_bo_reference(&vmw_user_bo->dma.base); | 867 | tmp = ttm_bo_reference(&vmw_user_bo->dma.base); |
868 | ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, | 868 | ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, |
@@ -870,19 +870,21 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
870 | false, | 870 | false, |
871 | ttm_buffer_type, | 871 | ttm_buffer_type, |
872 | &vmw_user_dmabuf_release, NULL); | 872 | &vmw_user_dmabuf_release, NULL); |
873 | if (unlikely(ret != 0)) { | 873 | if (unlikely(ret != 0)) |
874 | ttm_bo_unref(&tmp); | 874 | goto out_no_base_object; |
875 | } else { | 875 | else { |
876 | rep->handle = vmw_user_bo->base.hash.key; | 876 | rep->handle = vmw_user_bo->base.hash.key; |
877 | rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; | 877 | rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; |
878 | rep->cur_gmr_id = vmw_user_bo->base.hash.key; | 878 | rep->cur_gmr_id = vmw_user_bo->base.hash.key; |
879 | rep->cur_gmr_offset = 0; | 879 | rep->cur_gmr_offset = 0; |
880 | } | 880 | } |
881 | ttm_bo_unref(&tmp); | ||
882 | 881 | ||
882 | out_no_base_object: | ||
883 | ttm_bo_unref(&tmp); | ||
884 | out_no_dmabuf: | ||
883 | ttm_read_unlock(&vmaster->lock); | 885 | ttm_read_unlock(&vmaster->lock); |
884 | 886 | ||
885 | return 0; | 887 | return ret; |
886 | } | 888 | } |
887 | 889 | ||
888 | int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, | 890 | int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, |
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 8a4b32dca9f7..e1f07483691f 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include <linux/hid.h> | 32 | #include <linux/hid.h> |
33 | #include <linux/mutex.h> | 33 | #include <linux/mutex.h> |
34 | #include <linux/sched.h> | 34 | #include <linux/sched.h> |
35 | #include <linux/smp_lock.h> | ||
36 | 35 | ||
37 | #include <linux/hidraw.h> | 36 | #include <linux/hidraw.h> |
38 | 37 | ||
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index fedd88df9a18..984feb351a5a 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | #include <linux/smp_lock.h> | ||
33 | #include <linux/input.h> | 32 | #include <linux/input.h> |
34 | #include <linux/usb.h> | 33 | #include <linux/usb.h> |
35 | #include <linux/hid.h> | 34 | #include <linux/hid.h> |
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c index fa9708c2d723..4033974d1bb3 100644 --- a/drivers/hwmon/amc6821.c +++ b/drivers/hwmon/amc6821.c | |||
@@ -4,7 +4,7 @@ | |||
4 | Copyright (C) 2009 T. Mertelj <tomaz.mertelj@guest.arnes.si> | 4 | Copyright (C) 2009 T. Mertelj <tomaz.mertelj@guest.arnes.si> |
5 | 5 | ||
6 | Based on max6650.c: | 6 | Based on max6650.c: |
7 | Copyright (C) 2007 Hans J. Koch <hjk@linutronix.de> | 7 | Copyright (C) 2007 Hans J. Koch <hjk@hansjkoch.de> |
8 | 8 | ||
9 | This program is free software; you can redistribute it and/or modify | 9 | This program is free software; you can redistribute it and/or modify |
10 | it under the terms of the GNU General Public License as published by | 10 | it under the terms of the GNU General Public License as published by |
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c index 937983407e2a..c4c40be0edbf 100644 --- a/drivers/hwmon/i5k_amb.c +++ b/drivers/hwmon/i5k_amb.c | |||
@@ -497,12 +497,14 @@ static unsigned long chipset_ids[] = { | |||
497 | 0 | 497 | 0 |
498 | }; | 498 | }; |
499 | 499 | ||
500 | #ifdef MODULE | ||
500 | static struct pci_device_id i5k_amb_ids[] __devinitdata = { | 501 | static struct pci_device_id i5k_amb_ids[] __devinitdata = { |
501 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) }, | 502 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) }, |
502 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR) }, | 503 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR) }, |
503 | { 0, } | 504 | { 0, } |
504 | }; | 505 | }; |
505 | MODULE_DEVICE_TABLE(pci, i5k_amb_ids); | 506 | MODULE_DEVICE_TABLE(pci, i5k_amb_ids); |
507 | #endif | ||
506 | 508 | ||
507 | static int __devinit i5k_amb_probe(struct platform_device *pdev) | 509 | static int __devinit i5k_amb_probe(struct platform_device *pdev) |
508 | { | 510 | { |
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c index 9f4bae07f719..8853afce85ce 100644 --- a/drivers/hwmon/lis3lv02d_i2c.c +++ b/drivers/hwmon/lis3lv02d_i2c.c | |||
@@ -186,7 +186,7 @@ static int __devexit lis3lv02d_i2c_remove(struct i2c_client *client) | |||
186 | return 0; | 186 | return 0; |
187 | } | 187 | } |
188 | 188 | ||
189 | #ifdef CONFIG_PM | 189 | #ifdef CONFIG_PM_SLEEP |
190 | static int lis3lv02d_i2c_suspend(struct device *dev) | 190 | static int lis3lv02d_i2c_suspend(struct device *dev) |
191 | { | 191 | { |
192 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 192 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); |
@@ -213,12 +213,9 @@ static int lis3lv02d_i2c_resume(struct device *dev) | |||
213 | 213 | ||
214 | return 0; | 214 | return 0; |
215 | } | 215 | } |
216 | #else | 216 | #endif /* CONFIG_PM_SLEEP */ |
217 | #define lis3lv02d_i2c_suspend NULL | ||
218 | #define lis3lv02d_i2c_resume NULL | ||
219 | #define lis3lv02d_i2c_shutdown NULL | ||
220 | #endif | ||
221 | 217 | ||
218 | #ifdef CONFIG_PM_RUNTIME | ||
222 | static int lis3_i2c_runtime_suspend(struct device *dev) | 219 | static int lis3_i2c_runtime_suspend(struct device *dev) |
223 | { | 220 | { |
224 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 221 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); |
@@ -236,6 +233,7 @@ static int lis3_i2c_runtime_resume(struct device *dev) | |||
236 | lis3lv02d_poweron(lis3); | 233 | lis3lv02d_poweron(lis3); |
237 | return 0; | 234 | return 0; |
238 | } | 235 | } |
236 | #endif /* CONFIG_PM_RUNTIME */ | ||
239 | 237 | ||
240 | static const struct i2c_device_id lis3lv02d_id[] = { | 238 | static const struct i2c_device_id lis3lv02d_id[] = { |
241 | {"lis3lv02d", 0 }, | 239 | {"lis3lv02d", 0 }, |
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c index 6669255aadcf..c9ed14eba5a6 100644 --- a/drivers/hwmon/lm93.c +++ b/drivers/hwmon/lm93.c | |||
@@ -20,7 +20,7 @@ | |||
20 | Adapted to 2.6.20 by Carsten Emde <cbe@osadl.org> | 20 | Adapted to 2.6.20 by Carsten Emde <cbe@osadl.org> |
21 | Copyright (c) 2006 Carsten Emde, Open Source Automation Development Lab | 21 | Copyright (c) 2006 Carsten Emde, Open Source Automation Development Lab |
22 | 22 | ||
23 | Modified for mainline integration by Hans J. Koch <hjk@linutronix.de> | 23 | Modified for mainline integration by Hans J. Koch <hjk@hansjkoch.de> |
24 | Copyright (c) 2007 Hans J. Koch, Linutronix GmbH | 24 | Copyright (c) 2007 Hans J. Koch, Linutronix GmbH |
25 | 25 | ||
26 | This program is free software; you can redistribute it and/or modify | 26 | This program is free software; you can redistribute it and/or modify |
@@ -2629,7 +2629,7 @@ static void __exit lm93_exit(void) | |||
2629 | } | 2629 | } |
2630 | 2630 | ||
2631 | MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>, " | 2631 | MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>, " |
2632 | "Hans J. Koch <hjk@linutronix.de"); | 2632 | "Hans J. Koch <hjk@hansjkoch.de>"); |
2633 | MODULE_DESCRIPTION("LM93 driver"); | 2633 | MODULE_DESCRIPTION("LM93 driver"); |
2634 | MODULE_LICENSE("GPL"); | 2634 | MODULE_LICENSE("GPL"); |
2635 | 2635 | ||
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c index 464340f25496..4546d82f024a 100644 --- a/drivers/hwmon/lm95241.c +++ b/drivers/hwmon/lm95241.c | |||
@@ -128,9 +128,12 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr, | |||
128 | { | 128 | { |
129 | struct i2c_client *client = to_i2c_client(dev); | 129 | struct i2c_client *client = to_i2c_client(dev); |
130 | struct lm95241_data *data = i2c_get_clientdata(client); | 130 | struct lm95241_data *data = i2c_get_clientdata(client); |
131 | unsigned long val; | ||
131 | 132 | ||
132 | strict_strtol(buf, 10, &data->interval); | 133 | if (strict_strtoul(buf, 10, &val) < 0) |
133 | data->interval = data->interval * HZ / 1000; | 134 | return -EINVAL; |
135 | |||
136 | data->interval = val * HZ / 1000; | ||
134 | 137 | ||
135 | return count; | 138 | return count; |
136 | } | 139 | } |
@@ -188,7 +191,9 @@ static ssize_t set_type##flag(struct device *dev, \ | |||
188 | struct lm95241_data *data = i2c_get_clientdata(client); \ | 191 | struct lm95241_data *data = i2c_get_clientdata(client); \ |
189 | \ | 192 | \ |
190 | long val; \ | 193 | long val; \ |
191 | strict_strtol(buf, 10, &val); \ | 194 | \ |
195 | if (strict_strtol(buf, 10, &val) < 0) \ | ||
196 | return -EINVAL; \ | ||
192 | \ | 197 | \ |
193 | if ((val == 1) || (val == 2)) { \ | 198 | if ((val == 1) || (val == 2)) { \ |
194 | \ | 199 | \ |
@@ -227,7 +232,9 @@ static ssize_t set_min##flag(struct device *dev, \ | |||
227 | struct lm95241_data *data = i2c_get_clientdata(client); \ | 232 | struct lm95241_data *data = i2c_get_clientdata(client); \ |
228 | \ | 233 | \ |
229 | long val; \ | 234 | long val; \ |
230 | strict_strtol(buf, 10, &val); \ | 235 | \ |
236 | if (strict_strtol(buf, 10, &val) < 0) \ | ||
237 | return -EINVAL;\ | ||
231 | \ | 238 | \ |
232 | mutex_lock(&data->update_lock); \ | 239 | mutex_lock(&data->update_lock); \ |
233 | \ | 240 | \ |
@@ -256,7 +263,9 @@ static ssize_t set_max##flag(struct device *dev, \ | |||
256 | struct lm95241_data *data = i2c_get_clientdata(client); \ | 263 | struct lm95241_data *data = i2c_get_clientdata(client); \ |
257 | \ | 264 | \ |
258 | long val; \ | 265 | long val; \ |
259 | strict_strtol(buf, 10, &val); \ | 266 | \ |
267 | if (strict_strtol(buf, 10, &val) < 0) \ | ||
268 | return -EINVAL; \ | ||
260 | \ | 269 | \ |
261 | mutex_lock(&data->update_lock); \ | 270 | mutex_lock(&data->update_lock); \ |
262 | \ | 271 | \ |
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c index a0160ee5caef..9a11532ecae8 100644 --- a/drivers/hwmon/max6650.c +++ b/drivers/hwmon/max6650.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * max6650.c - Part of lm_sensors, Linux kernel modules for hardware | 2 | * max6650.c - Part of lm_sensors, Linux kernel modules for hardware |
3 | * monitoring. | 3 | * monitoring. |
4 | * | 4 | * |
5 | * (C) 2007 by Hans J. Koch <hjk@linutronix.de> | 5 | * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> |
6 | * | 6 | * |
7 | * based on code written by John Morris <john.morris@spirentcom.com> | 7 | * based on code written by John Morris <john.morris@spirentcom.com> |
8 | * Copyright (c) 2003 Spirent Communications | 8 | * Copyright (c) 2003 Spirent Communications |
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c index 1d840aa83782..cdbc7448491e 100644 --- a/drivers/hwmon/w83795.c +++ b/drivers/hwmon/w83795.c | |||
@@ -165,10 +165,14 @@ static const u8 IN_LSB_SHIFT_IDX[][2] = { | |||
165 | 165 | ||
166 | #define W83795_REG_VID_CTRL 0x6A | 166 | #define W83795_REG_VID_CTRL 0x6A |
167 | 167 | ||
168 | #define W83795_REG_ALARM_CTRL 0x40 | ||
169 | #define ALARM_CTRL_RTSACS (1 << 7) | ||
168 | #define W83795_REG_ALARM(index) (0x41 + (index)) | 170 | #define W83795_REG_ALARM(index) (0x41 + (index)) |
171 | #define W83795_REG_CLR_CHASSIS 0x4D | ||
169 | #define W83795_REG_BEEP(index) (0x50 + (index)) | 172 | #define W83795_REG_BEEP(index) (0x50 + (index)) |
170 | 173 | ||
171 | #define W83795_REG_CLR_CHASSIS 0x4D | 174 | #define W83795_REG_OVT_CFG 0x58 |
175 | #define OVT_CFG_SEL (1 << 7) | ||
172 | 176 | ||
173 | 177 | ||
174 | #define W83795_REG_FCMS1 0x201 | 178 | #define W83795_REG_FCMS1 0x201 |
@@ -178,6 +182,14 @@ static const u8 IN_LSB_SHIFT_IDX[][2] = { | |||
178 | 182 | ||
179 | #define W83795_REG_TSS(index) (0x209 + (index)) | 183 | #define W83795_REG_TSS(index) (0x209 + (index)) |
180 | 184 | ||
185 | #define TSS_MAP_RESERVED 0xff | ||
186 | static const u8 tss_map[4][6] = { | ||
187 | { 0, 1, 2, 3, 4, 5}, | ||
188 | { 6, 7, 8, 9, 0, 1}, | ||
189 | {10, 11, 12, 13, 2, 3}, | ||
190 | { 4, 5, 4, 5, TSS_MAP_RESERVED, TSS_MAP_RESERVED}, | ||
191 | }; | ||
192 | |||
181 | #define PWM_OUTPUT 0 | 193 | #define PWM_OUTPUT 0 |
182 | #define PWM_FREQ 1 | 194 | #define PWM_FREQ 1 |
183 | #define PWM_START 2 | 195 | #define PWM_START 2 |
@@ -369,6 +381,7 @@ struct w83795_data { | |||
369 | u8 setup_pwm[3]; /* Register value */ | 381 | u8 setup_pwm[3]; /* Register value */ |
370 | 382 | ||
371 | u8 alarms[6]; /* Register value */ | 383 | u8 alarms[6]; /* Register value */ |
384 | u8 enable_beep; | ||
372 | u8 beeps[6]; /* Register value */ | 385 | u8 beeps[6]; /* Register value */ |
373 | 386 | ||
374 | char valid; | 387 | char valid; |
@@ -499,8 +512,11 @@ static void w83795_update_limits(struct i2c_client *client) | |||
499 | } | 512 | } |
500 | 513 | ||
501 | /* Read beep settings */ | 514 | /* Read beep settings */ |
502 | for (i = 0; i < ARRAY_SIZE(data->beeps); i++) | 515 | if (data->enable_beep) { |
503 | data->beeps[i] = w83795_read(client, W83795_REG_BEEP(i)); | 516 | for (i = 0; i < ARRAY_SIZE(data->beeps); i++) |
517 | data->beeps[i] = | ||
518 | w83795_read(client, W83795_REG_BEEP(i)); | ||
519 | } | ||
504 | 520 | ||
505 | data->valid_limits = 1; | 521 | data->valid_limits = 1; |
506 | } | 522 | } |
@@ -577,6 +593,7 @@ static struct w83795_data *w83795_update_device(struct device *dev) | |||
577 | struct i2c_client *client = to_i2c_client(dev); | 593 | struct i2c_client *client = to_i2c_client(dev); |
578 | struct w83795_data *data = i2c_get_clientdata(client); | 594 | struct w83795_data *data = i2c_get_clientdata(client); |
579 | u16 tmp; | 595 | u16 tmp; |
596 | u8 intrusion; | ||
580 | int i; | 597 | int i; |
581 | 598 | ||
582 | mutex_lock(&data->update_lock); | 599 | mutex_lock(&data->update_lock); |
@@ -648,9 +665,24 @@ static struct w83795_data *w83795_update_device(struct device *dev) | |||
648 | w83795_read(client, W83795_REG_PWM(i, PWM_OUTPUT)); | 665 | w83795_read(client, W83795_REG_PWM(i, PWM_OUTPUT)); |
649 | } | 666 | } |
650 | 667 | ||
651 | /* update alarm */ | 668 | /* Update intrusion and alarms |
669 | * It is important to read intrusion first, because reading from | ||
670 | * register SMI STS6 clears the interrupt status temporarily. */ | ||
671 | tmp = w83795_read(client, W83795_REG_ALARM_CTRL); | ||
672 | /* Switch to interrupt status for intrusion if needed */ | ||
673 | if (tmp & ALARM_CTRL_RTSACS) | ||
674 | w83795_write(client, W83795_REG_ALARM_CTRL, | ||
675 | tmp & ~ALARM_CTRL_RTSACS); | ||
676 | intrusion = w83795_read(client, W83795_REG_ALARM(5)) & (1 << 6); | ||
677 | /* Switch to real-time alarms */ | ||
678 | w83795_write(client, W83795_REG_ALARM_CTRL, tmp | ALARM_CTRL_RTSACS); | ||
652 | for (i = 0; i < ARRAY_SIZE(data->alarms); i++) | 679 | for (i = 0; i < ARRAY_SIZE(data->alarms); i++) |
653 | data->alarms[i] = w83795_read(client, W83795_REG_ALARM(i)); | 680 | data->alarms[i] = w83795_read(client, W83795_REG_ALARM(i)); |
681 | data->alarms[5] |= intrusion; | ||
682 | /* Restore original configuration if needed */ | ||
683 | if (!(tmp & ALARM_CTRL_RTSACS)) | ||
684 | w83795_write(client, W83795_REG_ALARM_CTRL, | ||
685 | tmp & ~ALARM_CTRL_RTSACS); | ||
654 | 686 | ||
655 | data->last_updated = jiffies; | 687 | data->last_updated = jiffies; |
656 | data->valid = 1; | 688 | data->valid = 1; |
@@ -730,6 +762,10 @@ store_chassis_clear(struct device *dev, | |||
730 | val = w83795_read(client, W83795_REG_CLR_CHASSIS); | 762 | val = w83795_read(client, W83795_REG_CLR_CHASSIS); |
731 | val |= 0x80; | 763 | val |= 0x80; |
732 | w83795_write(client, W83795_REG_CLR_CHASSIS, val); | 764 | w83795_write(client, W83795_REG_CLR_CHASSIS, val); |
765 | |||
766 | /* Clear status and force cache refresh */ | ||
767 | w83795_read(client, W83795_REG_ALARM(5)); | ||
768 | data->valid = 0; | ||
733 | mutex_unlock(&data->update_lock); | 769 | mutex_unlock(&data->update_lock); |
734 | return count; | 770 | return count; |
735 | } | 771 | } |
@@ -857,20 +893,20 @@ show_pwm_enable(struct device *dev, struct device_attribute *attr, char *buf) | |||
857 | int index = sensor_attr->index; | 893 | int index = sensor_attr->index; |
858 | u8 tmp; | 894 | u8 tmp; |
859 | 895 | ||
860 | if (1 == (data->pwm_fcms[0] & (1 << index))) { | 896 | /* Speed cruise mode */ |
897 | if (data->pwm_fcms[0] & (1 << index)) { | ||
861 | tmp = 2; | 898 | tmp = 2; |
862 | goto out; | 899 | goto out; |
863 | } | 900 | } |
901 | /* Thermal cruise or SmartFan IV mode */ | ||
864 | for (tmp = 0; tmp < 6; tmp++) { | 902 | for (tmp = 0; tmp < 6; tmp++) { |
865 | if (data->pwm_tfmr[tmp] & (1 << index)) { | 903 | if (data->pwm_tfmr[tmp] & (1 << index)) { |
866 | tmp = 3; | 904 | tmp = 3; |
867 | goto out; | 905 | goto out; |
868 | } | 906 | } |
869 | } | 907 | } |
870 | if (data->pwm_fomc & (1 << index)) | 908 | /* Manual mode */ |
871 | tmp = 0; | 909 | tmp = 1; |
872 | else | ||
873 | tmp = 1; | ||
874 | 910 | ||
875 | out: | 911 | out: |
876 | return sprintf(buf, "%u\n", tmp); | 912 | return sprintf(buf, "%u\n", tmp); |
@@ -890,23 +926,21 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr, | |||
890 | 926 | ||
891 | if (strict_strtoul(buf, 10, &val) < 0) | 927 | if (strict_strtoul(buf, 10, &val) < 0) |
892 | return -EINVAL; | 928 | return -EINVAL; |
893 | if (val > 2) | 929 | if (val < 1 || val > 2) |
894 | return -EINVAL; | 930 | return -EINVAL; |
895 | 931 | ||
896 | mutex_lock(&data->update_lock); | 932 | mutex_lock(&data->update_lock); |
897 | switch (val) { | 933 | switch (val) { |
898 | case 0: | ||
899 | case 1: | 934 | case 1: |
935 | /* Clear speed cruise mode bits */ | ||
900 | data->pwm_fcms[0] &= ~(1 << index); | 936 | data->pwm_fcms[0] &= ~(1 << index); |
901 | w83795_write(client, W83795_REG_FCMS1, data->pwm_fcms[0]); | 937 | w83795_write(client, W83795_REG_FCMS1, data->pwm_fcms[0]); |
938 | /* Clear thermal cruise mode bits */ | ||
902 | for (i = 0; i < 6; i++) { | 939 | for (i = 0; i < 6; i++) { |
903 | data->pwm_tfmr[i] &= ~(1 << index); | 940 | data->pwm_tfmr[i] &= ~(1 << index); |
904 | w83795_write(client, W83795_REG_TFMR(i), | 941 | w83795_write(client, W83795_REG_TFMR(i), |
905 | data->pwm_tfmr[i]); | 942 | data->pwm_tfmr[i]); |
906 | } | 943 | } |
907 | data->pwm_fomc |= 1 << index; | ||
908 | data->pwm_fomc ^= val << index; | ||
909 | w83795_write(client, W83795_REG_FOMC, data->pwm_fomc); | ||
910 | break; | 944 | break; |
911 | case 2: | 945 | case 2: |
912 | data->pwm_fcms[0] |= (1 << index); | 946 | data->pwm_fcms[0] |= (1 << index); |
@@ -918,23 +952,60 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr, | |||
918 | } | 952 | } |
919 | 953 | ||
920 | static ssize_t | 954 | static ssize_t |
955 | show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf) | ||
956 | { | ||
957 | struct w83795_data *data = w83795_update_pwm_config(dev); | ||
958 | int index = to_sensor_dev_attr_2(attr)->index; | ||
959 | unsigned int mode; | ||
960 | |||
961 | if (data->pwm_fomc & (1 << index)) | ||
962 | mode = 0; /* DC */ | ||
963 | else | ||
964 | mode = 1; /* PWM */ | ||
965 | |||
966 | return sprintf(buf, "%u\n", mode); | ||
967 | } | ||
968 | |||
969 | /* | ||
970 | * Check whether a given temperature source can ever be useful. | ||
971 | * Returns the number of selectable temperature channels which are | ||
972 | * enabled. | ||
973 | */ | ||
974 | static int w83795_tss_useful(const struct w83795_data *data, int tsrc) | ||
975 | { | ||
976 | int useful = 0, i; | ||
977 | |||
978 | for (i = 0; i < 4; i++) { | ||
979 | if (tss_map[i][tsrc] == TSS_MAP_RESERVED) | ||
980 | continue; | ||
981 | if (tss_map[i][tsrc] < 6) /* Analog */ | ||
982 | useful += (data->has_temp >> tss_map[i][tsrc]) & 1; | ||
983 | else /* Digital */ | ||
984 | useful += (data->has_dts >> (tss_map[i][tsrc] - 6)) & 1; | ||
985 | } | ||
986 | |||
987 | return useful; | ||
988 | } | ||
989 | |||
990 | static ssize_t | ||
921 | show_temp_src(struct device *dev, struct device_attribute *attr, char *buf) | 991 | show_temp_src(struct device *dev, struct device_attribute *attr, char *buf) |
922 | { | 992 | { |
923 | struct sensor_device_attribute_2 *sensor_attr = | 993 | struct sensor_device_attribute_2 *sensor_attr = |
924 | to_sensor_dev_attr_2(attr); | 994 | to_sensor_dev_attr_2(attr); |
925 | struct w83795_data *data = w83795_update_pwm_config(dev); | 995 | struct w83795_data *data = w83795_update_pwm_config(dev); |
926 | int index = sensor_attr->index; | 996 | int index = sensor_attr->index; |
927 | u8 val = index / 2; | 997 | u8 tmp = data->temp_src[index / 2]; |
928 | u8 tmp = data->temp_src[val]; | ||
929 | 998 | ||
930 | if (index & 1) | 999 | if (index & 1) |
931 | val = 4; | 1000 | tmp >>= 4; /* Pick high nibble */ |
932 | else | 1001 | else |
933 | val = 0; | 1002 | tmp &= 0x0f; /* Pick low nibble */ |
934 | tmp >>= val; | ||
935 | tmp &= 0x0f; | ||
936 | 1003 | ||
937 | return sprintf(buf, "%u\n", tmp); | 1004 | /* Look-up the actual temperature channel number */ |
1005 | if (tmp >= 4 || tss_map[tmp][index] == TSS_MAP_RESERVED) | ||
1006 | return -EINVAL; /* Shouldn't happen */ | ||
1007 | |||
1008 | return sprintf(buf, "%u\n", (unsigned int)tss_map[tmp][index] + 1); | ||
938 | } | 1009 | } |
939 | 1010 | ||
940 | static ssize_t | 1011 | static ssize_t |
@@ -946,12 +1017,21 @@ store_temp_src(struct device *dev, struct device_attribute *attr, | |||
946 | struct sensor_device_attribute_2 *sensor_attr = | 1017 | struct sensor_device_attribute_2 *sensor_attr = |
947 | to_sensor_dev_attr_2(attr); | 1018 | to_sensor_dev_attr_2(attr); |
948 | int index = sensor_attr->index; | 1019 | int index = sensor_attr->index; |
949 | unsigned long tmp; | 1020 | int tmp; |
1021 | unsigned long channel; | ||
950 | u8 val = index / 2; | 1022 | u8 val = index / 2; |
951 | 1023 | ||
952 | if (strict_strtoul(buf, 10, &tmp) < 0) | 1024 | if (strict_strtoul(buf, 10, &channel) < 0 || |
1025 | channel < 1 || channel > 14) | ||
1026 | return -EINVAL; | ||
1027 | |||
1028 | /* Check if request can be fulfilled */ | ||
1029 | for (tmp = 0; tmp < 4; tmp++) { | ||
1030 | if (tss_map[tmp][index] == channel - 1) | ||
1031 | break; | ||
1032 | } | ||
1033 | if (tmp == 4) /* No match */ | ||
953 | return -EINVAL; | 1034 | return -EINVAL; |
954 | tmp = SENSORS_LIMIT(tmp, 0, 15); | ||
955 | 1035 | ||
956 | mutex_lock(&data->update_lock); | 1036 | mutex_lock(&data->update_lock); |
957 | if (index & 1) { | 1037 | if (index & 1) { |
@@ -1515,7 +1595,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, | |||
1515 | 1595 | ||
1516 | #define NOT_USED -1 | 1596 | #define NOT_USED -1 |
1517 | 1597 | ||
1518 | /* Don't change the attribute order, _max and _min are accessed by index | 1598 | /* Don't change the attribute order, _max, _min and _beep are accessed by index |
1519 | * somewhere else in the code */ | 1599 | * somewhere else in the code */ |
1520 | #define SENSOR_ATTR_IN(index) { \ | 1600 | #define SENSOR_ATTR_IN(index) { \ |
1521 | SENSOR_ATTR_2(in##index##_input, S_IRUGO, show_in, NULL, \ | 1601 | SENSOR_ATTR_2(in##index##_input, S_IRUGO, show_in, NULL, \ |
@@ -1530,6 +1610,8 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, | |||
1530 | show_alarm_beep, store_beep, BEEP_ENABLE, \ | 1610 | show_alarm_beep, store_beep, BEEP_ENABLE, \ |
1531 | index + ((index > 14) ? 1 : 0)) } | 1611 | index + ((index > 14) ? 1 : 0)) } |
1532 | 1612 | ||
1613 | /* Don't change the attribute order, _beep is accessed by index | ||
1614 | * somewhere else in the code */ | ||
1533 | #define SENSOR_ATTR_FAN(index) { \ | 1615 | #define SENSOR_ATTR_FAN(index) { \ |
1534 | SENSOR_ATTR_2(fan##index##_input, S_IRUGO, show_fan, \ | 1616 | SENSOR_ATTR_2(fan##index##_input, S_IRUGO, show_fan, \ |
1535 | NULL, FAN_INPUT, index - 1), \ | 1617 | NULL, FAN_INPUT, index - 1), \ |
@@ -1553,9 +1635,13 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, | |||
1553 | show_pwm, store_pwm, PWM_FREQ, index - 1), \ | 1635 | show_pwm, store_pwm, PWM_FREQ, index - 1), \ |
1554 | SENSOR_ATTR_2(pwm##index##_enable, S_IWUSR | S_IRUGO, \ | 1636 | SENSOR_ATTR_2(pwm##index##_enable, S_IWUSR | S_IRUGO, \ |
1555 | show_pwm_enable, store_pwm_enable, NOT_USED, index - 1), \ | 1637 | show_pwm_enable, store_pwm_enable, NOT_USED, index - 1), \ |
1638 | SENSOR_ATTR_2(pwm##index##_mode, S_IRUGO, \ | ||
1639 | show_pwm_mode, NULL, NOT_USED, index - 1), \ | ||
1556 | SENSOR_ATTR_2(fan##index##_target, S_IWUSR | S_IRUGO, \ | 1640 | SENSOR_ATTR_2(fan##index##_target, S_IWUSR | S_IRUGO, \ |
1557 | show_fanin, store_fanin, FANIN_TARGET, index - 1) } | 1641 | show_fanin, store_fanin, FANIN_TARGET, index - 1) } |
1558 | 1642 | ||
1643 | /* Don't change the attribute order, _beep is accessed by index | ||
1644 | * somewhere else in the code */ | ||
1559 | #define SENSOR_ATTR_DTS(index) { \ | 1645 | #define SENSOR_ATTR_DTS(index) { \ |
1560 | SENSOR_ATTR_2(temp##index##_type, S_IRUGO , \ | 1646 | SENSOR_ATTR_2(temp##index##_type, S_IRUGO , \ |
1561 | show_dts_mode, NULL, NOT_USED, index - 7), \ | 1647 | show_dts_mode, NULL, NOT_USED, index - 7), \ |
@@ -1574,6 +1660,8 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, | |||
1574 | SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \ | 1660 | SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \ |
1575 | show_alarm_beep, store_beep, BEEP_ENABLE, index + 17) } | 1661 | show_alarm_beep, store_beep, BEEP_ENABLE, index + 17) } |
1576 | 1662 | ||
1663 | /* Don't change the attribute order, _beep is accessed by index | ||
1664 | * somewhere else in the code */ | ||
1577 | #define SENSOR_ATTR_TEMP(index) { \ | 1665 | #define SENSOR_ATTR_TEMP(index) { \ |
1578 | SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \ | 1666 | SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \ |
1579 | show_temp_mode, store_temp_mode, NOT_USED, index - 1), \ | 1667 | show_temp_mode, store_temp_mode, NOT_USED, index - 1), \ |
@@ -1593,8 +1681,6 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, | |||
1593 | SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \ | 1681 | SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \ |
1594 | show_alarm_beep, store_beep, BEEP_ENABLE, \ | 1682 | show_alarm_beep, store_beep, BEEP_ENABLE, \ |
1595 | index + (index > 4 ? 11 : 17)), \ | 1683 | index + (index > 4 ? 11 : 17)), \ |
1596 | SENSOR_ATTR_2(temp##index##_source_sel, S_IWUSR | S_IRUGO, \ | ||
1597 | show_temp_src, store_temp_src, NOT_USED, index - 1), \ | ||
1598 | SENSOR_ATTR_2(temp##index##_pwm_enable, S_IWUSR | S_IRUGO, \ | 1684 | SENSOR_ATTR_2(temp##index##_pwm_enable, S_IWUSR | S_IRUGO, \ |
1599 | show_temp_pwm_enable, store_temp_pwm_enable, \ | 1685 | show_temp_pwm_enable, store_temp_pwm_enable, \ |
1600 | TEMP_PWM_ENABLE, index - 1), \ | 1686 | TEMP_PWM_ENABLE, index - 1), \ |
@@ -1680,7 +1766,7 @@ static const struct sensor_device_attribute_2 w83795_fan[][4] = { | |||
1680 | SENSOR_ATTR_FAN(14), | 1766 | SENSOR_ATTR_FAN(14), |
1681 | }; | 1767 | }; |
1682 | 1768 | ||
1683 | static const struct sensor_device_attribute_2 w83795_temp[][29] = { | 1769 | static const struct sensor_device_attribute_2 w83795_temp[][28] = { |
1684 | SENSOR_ATTR_TEMP(1), | 1770 | SENSOR_ATTR_TEMP(1), |
1685 | SENSOR_ATTR_TEMP(2), | 1771 | SENSOR_ATTR_TEMP(2), |
1686 | SENSOR_ATTR_TEMP(3), | 1772 | SENSOR_ATTR_TEMP(3), |
@@ -1700,7 +1786,7 @@ static const struct sensor_device_attribute_2 w83795_dts[][8] = { | |||
1700 | SENSOR_ATTR_DTS(14), | 1786 | SENSOR_ATTR_DTS(14), |
1701 | }; | 1787 | }; |
1702 | 1788 | ||
1703 | static const struct sensor_device_attribute_2 w83795_pwm[][7] = { | 1789 | static const struct sensor_device_attribute_2 w83795_pwm[][8] = { |
1704 | SENSOR_ATTR_PWM(1), | 1790 | SENSOR_ATTR_PWM(1), |
1705 | SENSOR_ATTR_PWM(2), | 1791 | SENSOR_ATTR_PWM(2), |
1706 | SENSOR_ATTR_PWM(3), | 1792 | SENSOR_ATTR_PWM(3), |
@@ -1711,13 +1797,24 @@ static const struct sensor_device_attribute_2 w83795_pwm[][7] = { | |||
1711 | SENSOR_ATTR_PWM(8), | 1797 | SENSOR_ATTR_PWM(8), |
1712 | }; | 1798 | }; |
1713 | 1799 | ||
1800 | static const struct sensor_device_attribute_2 w83795_tss[6] = { | ||
1801 | SENSOR_ATTR_2(temp1_source_sel, S_IWUSR | S_IRUGO, | ||
1802 | show_temp_src, store_temp_src, NOT_USED, 0), | ||
1803 | SENSOR_ATTR_2(temp2_source_sel, S_IWUSR | S_IRUGO, | ||
1804 | show_temp_src, store_temp_src, NOT_USED, 1), | ||
1805 | SENSOR_ATTR_2(temp3_source_sel, S_IWUSR | S_IRUGO, | ||
1806 | show_temp_src, store_temp_src, NOT_USED, 2), | ||
1807 | SENSOR_ATTR_2(temp4_source_sel, S_IWUSR | S_IRUGO, | ||
1808 | show_temp_src, store_temp_src, NOT_USED, 3), | ||
1809 | SENSOR_ATTR_2(temp5_source_sel, S_IWUSR | S_IRUGO, | ||
1810 | show_temp_src, store_temp_src, NOT_USED, 4), | ||
1811 | SENSOR_ATTR_2(temp6_source_sel, S_IWUSR | S_IRUGO, | ||
1812 | show_temp_src, store_temp_src, NOT_USED, 5), | ||
1813 | }; | ||
1814 | |||
1714 | static const struct sensor_device_attribute_2 sda_single_files[] = { | 1815 | static const struct sensor_device_attribute_2 sda_single_files[] = { |
1715 | SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep, | 1816 | SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep, |
1716 | store_chassis_clear, ALARM_STATUS, 46), | 1817 | store_chassis_clear, ALARM_STATUS, 46), |
1717 | SENSOR_ATTR_2(intrusion0_beep, S_IWUSR | S_IRUGO, show_alarm_beep, | ||
1718 | store_beep, BEEP_ENABLE, 46), | ||
1719 | SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_alarm_beep, | ||
1720 | store_beep, BEEP_ENABLE, 47), | ||
1721 | #ifdef CONFIG_SENSORS_W83795_FANCTRL | 1818 | #ifdef CONFIG_SENSORS_W83795_FANCTRL |
1722 | SENSOR_ATTR_2(speed_cruise_tolerance, S_IWUSR | S_IRUGO, show_fanin, | 1819 | SENSOR_ATTR_2(speed_cruise_tolerance, S_IWUSR | S_IRUGO, show_fanin, |
1723 | store_fanin, FANIN_TOL, NOT_USED), | 1820 | store_fanin, FANIN_TOL, NOT_USED), |
@@ -1730,6 +1827,13 @@ static const struct sensor_device_attribute_2 sda_single_files[] = { | |||
1730 | #endif | 1827 | #endif |
1731 | }; | 1828 | }; |
1732 | 1829 | ||
1830 | static const struct sensor_device_attribute_2 sda_beep_files[] = { | ||
1831 | SENSOR_ATTR_2(intrusion0_beep, S_IWUSR | S_IRUGO, show_alarm_beep, | ||
1832 | store_beep, BEEP_ENABLE, 46), | ||
1833 | SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_alarm_beep, | ||
1834 | store_beep, BEEP_ENABLE, 47), | ||
1835 | }; | ||
1836 | |||
1733 | /* | 1837 | /* |
1734 | * Driver interface | 1838 | * Driver interface |
1735 | */ | 1839 | */ |
@@ -1859,6 +1963,8 @@ static int w83795_handle_files(struct device *dev, int (*fn)(struct device *, | |||
1859 | if (!(data->has_in & (1 << i))) | 1963 | if (!(data->has_in & (1 << i))) |
1860 | continue; | 1964 | continue; |
1861 | for (j = 0; j < ARRAY_SIZE(w83795_in[0]); j++) { | 1965 | for (j = 0; j < ARRAY_SIZE(w83795_in[0]); j++) { |
1966 | if (j == 4 && !data->enable_beep) | ||
1967 | continue; | ||
1862 | err = fn(dev, &w83795_in[i][j].dev_attr); | 1968 | err = fn(dev, &w83795_in[i][j].dev_attr); |
1863 | if (err) | 1969 | if (err) |
1864 | return err; | 1970 | return err; |
@@ -1869,18 +1975,37 @@ static int w83795_handle_files(struct device *dev, int (*fn)(struct device *, | |||
1869 | if (!(data->has_fan & (1 << i))) | 1975 | if (!(data->has_fan & (1 << i))) |
1870 | continue; | 1976 | continue; |
1871 | for (j = 0; j < ARRAY_SIZE(w83795_fan[0]); j++) { | 1977 | for (j = 0; j < ARRAY_SIZE(w83795_fan[0]); j++) { |
1978 | if (j == 3 && !data->enable_beep) | ||
1979 | continue; | ||
1872 | err = fn(dev, &w83795_fan[i][j].dev_attr); | 1980 | err = fn(dev, &w83795_fan[i][j].dev_attr); |
1873 | if (err) | 1981 | if (err) |
1874 | return err; | 1982 | return err; |
1875 | } | 1983 | } |
1876 | } | 1984 | } |
1877 | 1985 | ||
1986 | for (i = 0; i < ARRAY_SIZE(w83795_tss); i++) { | ||
1987 | j = w83795_tss_useful(data, i); | ||
1988 | if (!j) | ||
1989 | continue; | ||
1990 | err = fn(dev, &w83795_tss[i].dev_attr); | ||
1991 | if (err) | ||
1992 | return err; | ||
1993 | } | ||
1994 | |||
1878 | for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) { | 1995 | for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) { |
1879 | err = fn(dev, &sda_single_files[i].dev_attr); | 1996 | err = fn(dev, &sda_single_files[i].dev_attr); |
1880 | if (err) | 1997 | if (err) |
1881 | return err; | 1998 | return err; |
1882 | } | 1999 | } |
1883 | 2000 | ||
2001 | if (data->enable_beep) { | ||
2002 | for (i = 0; i < ARRAY_SIZE(sda_beep_files); i++) { | ||
2003 | err = fn(dev, &sda_beep_files[i].dev_attr); | ||
2004 | if (err) | ||
2005 | return err; | ||
2006 | } | ||
2007 | } | ||
2008 | |||
1884 | #ifdef CONFIG_SENSORS_W83795_FANCTRL | 2009 | #ifdef CONFIG_SENSORS_W83795_FANCTRL |
1885 | for (i = 0; i < data->has_pwm; i++) { | 2010 | for (i = 0; i < data->has_pwm; i++) { |
1886 | for (j = 0; j < ARRAY_SIZE(w83795_pwm[0]); j++) { | 2011 | for (j = 0; j < ARRAY_SIZE(w83795_pwm[0]); j++) { |
@@ -1899,6 +2024,8 @@ static int w83795_handle_files(struct device *dev, int (*fn)(struct device *, | |||
1899 | #else | 2024 | #else |
1900 | for (j = 0; j < 8; j++) { | 2025 | for (j = 0; j < 8; j++) { |
1901 | #endif | 2026 | #endif |
2027 | if (j == 7 && !data->enable_beep) | ||
2028 | continue; | ||
1902 | err = fn(dev, &w83795_temp[i][j].dev_attr); | 2029 | err = fn(dev, &w83795_temp[i][j].dev_attr); |
1903 | if (err) | 2030 | if (err) |
1904 | return err; | 2031 | return err; |
@@ -1910,6 +2037,8 @@ static int w83795_handle_files(struct device *dev, int (*fn)(struct device *, | |||
1910 | if (!(data->has_dts & (1 << i))) | 2037 | if (!(data->has_dts & (1 << i))) |
1911 | continue; | 2038 | continue; |
1912 | for (j = 0; j < ARRAY_SIZE(w83795_dts[0]); j++) { | 2039 | for (j = 0; j < ARRAY_SIZE(w83795_dts[0]); j++) { |
2040 | if (j == 7 && !data->enable_beep) | ||
2041 | continue; | ||
1913 | err = fn(dev, &w83795_dts[i][j].dev_attr); | 2042 | err = fn(dev, &w83795_dts[i][j].dev_attr); |
1914 | if (err) | 2043 | if (err) |
1915 | return err; | 2044 | return err; |
@@ -2049,6 +2178,18 @@ static int w83795_probe(struct i2c_client *client, | |||
2049 | else | 2178 | else |
2050 | data->has_pwm = 2; | 2179 | data->has_pwm = 2; |
2051 | 2180 | ||
2181 | /* Check if BEEP pin is available */ | ||
2182 | if (data->chip_type == w83795g) { | ||
2183 | /* The W83795G has a dedicated BEEP pin */ | ||
2184 | data->enable_beep = 1; | ||
2185 | } else { | ||
2186 | /* The W83795ADG has a shared pin for OVT# and BEEP, so you | ||
2187 | * can't have both */ | ||
2188 | tmp = w83795_read(client, W83795_REG_OVT_CFG); | ||
2189 | if ((tmp & OVT_CFG_SEL) == 0) | ||
2190 | data->enable_beep = 1; | ||
2191 | } | ||
2192 | |||
2052 | err = w83795_handle_files(dev, device_create_file); | 2193 | err = w83795_handle_files(dev, device_create_file); |
2053 | if (err) | 2194 | if (err) |
2054 | goto exit_remove; | 2195 | goto exit_remove; |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index d231f683f576..6b4cc567645b 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -848,6 +848,18 @@ static int i2c_register_adapter(struct i2c_adapter *adap) | |||
848 | goto out_list; | 848 | goto out_list; |
849 | } | 849 | } |
850 | 850 | ||
851 | /* Sanity checks */ | ||
852 | if (unlikely(adap->name[0] == '\0')) { | ||
853 | pr_err("i2c-core: Attempt to register an adapter with " | ||
854 | "no name!\n"); | ||
855 | return -EINVAL; | ||
856 | } | ||
857 | if (unlikely(!adap->algo)) { | ||
858 | pr_err("i2c-core: Attempt to register adapter '%s' with " | ||
859 | "no algo!\n", adap->name); | ||
860 | return -EINVAL; | ||
861 | } | ||
862 | |||
851 | rt_mutex_init(&adap->bus_lock); | 863 | rt_mutex_init(&adap->bus_lock); |
852 | mutex_init(&adap->userspace_clients_lock); | 864 | mutex_init(&adap->userspace_clients_lock); |
853 | INIT_LIST_HEAD(&adap->userspace_clients); | 865 | INIT_LIST_HEAD(&adap->userspace_clients); |
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index d32a4843fc3a..d7a4833be416 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c | |||
@@ -120,7 +120,6 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent, | |||
120 | snprintf(priv->adap.name, sizeof(priv->adap.name), | 120 | snprintf(priv->adap.name, sizeof(priv->adap.name), |
121 | "i2c-%d-mux (chan_id %d)", i2c_adapter_id(parent), chan_id); | 121 | "i2c-%d-mux (chan_id %d)", i2c_adapter_id(parent), chan_id); |
122 | priv->adap.owner = THIS_MODULE; | 122 | priv->adap.owner = THIS_MODULE; |
123 | priv->adap.id = parent->id; | ||
124 | priv->adap.algo = &priv->algo; | 123 | priv->adap.algo = &priv->algo; |
125 | priv->adap.algo_data = priv; | 124 | priv->adap.algo_data = priv; |
126 | priv->adap.dev.parent = &parent->dev; | 125 | priv->adap.dev.parent = &parent->dev; |
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index 6078992da3f0..9292a15ad7c4 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c | |||
@@ -40,7 +40,6 @@ | |||
40 | #include <linux/highmem.h> | 40 | #include <linux/highmem.h> |
41 | #include <linux/io.h> | 41 | #include <linux/io.h> |
42 | #include <linux/jiffies.h> | 42 | #include <linux/jiffies.h> |
43 | #include <linux/smp_lock.h> | ||
44 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
45 | 44 | ||
46 | #include "ipath_kernel.h" | 45 | #include "ipath_kernel.h" |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index cfc1d65c4577..1e1e347a7715 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -1123,7 +1123,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) | |||
1123 | } | 1123 | } |
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | static int srp_queuecommand(struct scsi_cmnd *scmnd, | 1126 | static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, |
1127 | void (*done)(struct scsi_cmnd *)) | 1127 | void (*done)(struct scsi_cmnd *)) |
1128 | { | 1128 | { |
1129 | struct srp_target_port *target = host_to_target(scmnd->device->host); | 1129 | struct srp_target_port *target = host_to_target(scmnd->device->host); |
@@ -1196,6 +1196,8 @@ err: | |||
1196 | return SCSI_MLQUEUE_HOST_BUSY; | 1196 | return SCSI_MLQUEUE_HOST_BUSY; |
1197 | } | 1197 | } |
1198 | 1198 | ||
1199 | static DEF_SCSI_QCMD(srp_queuecommand) | ||
1200 | |||
1199 | static int srp_alloc_iu_bufs(struct srp_target_port *target) | 1201 | static int srp_alloc_iu_bufs(struct srp_target_port *target) |
1200 | { | 1202 | { |
1201 | int i; | 1203 | int i; |
diff --git a/drivers/input/input.c b/drivers/input/input.c index 7f26ca6ecf75..db409d6bd5d2 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
25 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
26 | #include <linux/rcupdate.h> | 26 | #include <linux/rcupdate.h> |
27 | #include <linux/smp_lock.h> | ||
28 | #include "input-compat.h" | 27 | #include "input-compat.h" |
29 | 28 | ||
30 | MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); | 29 | MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); |
@@ -753,7 +752,7 @@ static int input_default_setkeycode(struct input_dev *dev, | |||
753 | if (index >= dev->keycodemax) | 752 | if (index >= dev->keycodemax) |
754 | return -EINVAL; | 753 | return -EINVAL; |
755 | 754 | ||
756 | if (dev->keycodesize < sizeof(dev->keycode) && | 755 | if (dev->keycodesize < sizeof(ke->keycode) && |
757 | (ke->keycode >> (dev->keycodesize * 8))) | 756 | (ke->keycode >> (dev->keycodesize * 8))) |
758 | return -EINVAL; | 757 | return -EINVAL; |
759 | 758 | ||
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c index cd82bb125915..b7ba4597f7f0 100644 --- a/drivers/input/serio/serio_raw.c +++ b/drivers/input/serio/serio_raw.c | |||
@@ -11,7 +11,6 @@ | |||
11 | 11 | ||
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/smp_lock.h> | ||
15 | #include <linux/poll.h> | 14 | #include <linux/poll.h> |
16 | #include <linux/module.h> | 15 | #include <linux/module.h> |
17 | #include <linux/serio.h> | 16 | #include <linux/serio.h> |
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index 57b25b84d1fc..0a619c558bfb 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c | |||
@@ -1097,7 +1097,7 @@ store_tabletPointerMode(struct device *dev, struct device_attribute *attr, const | |||
1097 | } | 1097 | } |
1098 | 1098 | ||
1099 | static DEVICE_ATTR(pointer_mode, | 1099 | static DEVICE_ATTR(pointer_mode, |
1100 | S_IRUGO | S_IWUGO, | 1100 | S_IRUGO | S_IWUSR, |
1101 | show_tabletPointerMode, store_tabletPointerMode); | 1101 | show_tabletPointerMode, store_tabletPointerMode); |
1102 | 1102 | ||
1103 | /*********************************************************************** | 1103 | /*********************************************************************** |
@@ -1134,7 +1134,7 @@ store_tabletCoordinateMode(struct device *dev, struct device_attribute *attr, co | |||
1134 | } | 1134 | } |
1135 | 1135 | ||
1136 | static DEVICE_ATTR(coordinate_mode, | 1136 | static DEVICE_ATTR(coordinate_mode, |
1137 | S_IRUGO | S_IWUGO, | 1137 | S_IRUGO | S_IWUSR, |
1138 | show_tabletCoordinateMode, store_tabletCoordinateMode); | 1138 | show_tabletCoordinateMode, store_tabletCoordinateMode); |
1139 | 1139 | ||
1140 | /*********************************************************************** | 1140 | /*********************************************************************** |
@@ -1176,7 +1176,7 @@ store_tabletToolMode(struct device *dev, struct device_attribute *attr, const ch | |||
1176 | } | 1176 | } |
1177 | 1177 | ||
1178 | static DEVICE_ATTR(tool_mode, | 1178 | static DEVICE_ATTR(tool_mode, |
1179 | S_IRUGO | S_IWUGO, | 1179 | S_IRUGO | S_IWUSR, |
1180 | show_tabletToolMode, store_tabletToolMode); | 1180 | show_tabletToolMode, store_tabletToolMode); |
1181 | 1181 | ||
1182 | /*********************************************************************** | 1182 | /*********************************************************************** |
@@ -1219,7 +1219,7 @@ store_tabletXtilt(struct device *dev, struct device_attribute *attr, const char | |||
1219 | } | 1219 | } |
1220 | 1220 | ||
1221 | static DEVICE_ATTR(xtilt, | 1221 | static DEVICE_ATTR(xtilt, |
1222 | S_IRUGO | S_IWUGO, show_tabletXtilt, store_tabletXtilt); | 1222 | S_IRUGO | S_IWUSR, show_tabletXtilt, store_tabletXtilt); |
1223 | 1223 | ||
1224 | /*********************************************************************** | 1224 | /*********************************************************************** |
1225 | * support routines for the 'ytilt' file. Note that this file | 1225 | * support routines for the 'ytilt' file. Note that this file |
@@ -1261,7 +1261,7 @@ store_tabletYtilt(struct device *dev, struct device_attribute *attr, const char | |||
1261 | } | 1261 | } |
1262 | 1262 | ||
1263 | static DEVICE_ATTR(ytilt, | 1263 | static DEVICE_ATTR(ytilt, |
1264 | S_IRUGO | S_IWUGO, show_tabletYtilt, store_tabletYtilt); | 1264 | S_IRUGO | S_IWUSR, show_tabletYtilt, store_tabletYtilt); |
1265 | 1265 | ||
1266 | /*********************************************************************** | 1266 | /*********************************************************************** |
1267 | * support routines for the 'jitter' file. Note that this file | 1267 | * support routines for the 'jitter' file. Note that this file |
@@ -1288,7 +1288,7 @@ store_tabletJitterDelay(struct device *dev, struct device_attribute *attr, const | |||
1288 | } | 1288 | } |
1289 | 1289 | ||
1290 | static DEVICE_ATTR(jitter, | 1290 | static DEVICE_ATTR(jitter, |
1291 | S_IRUGO | S_IWUGO, | 1291 | S_IRUGO | S_IWUSR, |
1292 | show_tabletJitterDelay, store_tabletJitterDelay); | 1292 | show_tabletJitterDelay, store_tabletJitterDelay); |
1293 | 1293 | ||
1294 | /*********************************************************************** | 1294 | /*********************************************************************** |
@@ -1317,7 +1317,7 @@ store_tabletProgrammableDelay(struct device *dev, struct device_attribute *attr, | |||
1317 | } | 1317 | } |
1318 | 1318 | ||
1319 | static DEVICE_ATTR(delay, | 1319 | static DEVICE_ATTR(delay, |
1320 | S_IRUGO | S_IWUGO, | 1320 | S_IRUGO | S_IWUSR, |
1321 | show_tabletProgrammableDelay, store_tabletProgrammableDelay); | 1321 | show_tabletProgrammableDelay, store_tabletProgrammableDelay); |
1322 | 1322 | ||
1323 | /*********************************************************************** | 1323 | /*********************************************************************** |
@@ -1406,7 +1406,7 @@ store_tabletStylusUpper(struct device *dev, struct device_attribute *attr, const | |||
1406 | } | 1406 | } |
1407 | 1407 | ||
1408 | static DEVICE_ATTR(stylus_upper, | 1408 | static DEVICE_ATTR(stylus_upper, |
1409 | S_IRUGO | S_IWUGO, | 1409 | S_IRUGO | S_IWUSR, |
1410 | show_tabletStylusUpper, store_tabletStylusUpper); | 1410 | show_tabletStylusUpper, store_tabletStylusUpper); |
1411 | 1411 | ||
1412 | /*********************************************************************** | 1412 | /*********************************************************************** |
@@ -1437,7 +1437,7 @@ store_tabletStylusLower(struct device *dev, struct device_attribute *attr, const | |||
1437 | } | 1437 | } |
1438 | 1438 | ||
1439 | static DEVICE_ATTR(stylus_lower, | 1439 | static DEVICE_ATTR(stylus_lower, |
1440 | S_IRUGO | S_IWUGO, | 1440 | S_IRUGO | S_IWUSR, |
1441 | show_tabletStylusLower, store_tabletStylusLower); | 1441 | show_tabletStylusLower, store_tabletStylusLower); |
1442 | 1442 | ||
1443 | /*********************************************************************** | 1443 | /*********************************************************************** |
@@ -1475,7 +1475,7 @@ store_tabletMouseLeft(struct device *dev, struct device_attribute *attr, const c | |||
1475 | } | 1475 | } |
1476 | 1476 | ||
1477 | static DEVICE_ATTR(mouse_left, | 1477 | static DEVICE_ATTR(mouse_left, |
1478 | S_IRUGO | S_IWUGO, | 1478 | S_IRUGO | S_IWUSR, |
1479 | show_tabletMouseLeft, store_tabletMouseLeft); | 1479 | show_tabletMouseLeft, store_tabletMouseLeft); |
1480 | 1480 | ||
1481 | /*********************************************************************** | 1481 | /*********************************************************************** |
@@ -1505,7 +1505,7 @@ store_tabletMouseMiddle(struct device *dev, struct device_attribute *attr, const | |||
1505 | } | 1505 | } |
1506 | 1506 | ||
1507 | static DEVICE_ATTR(mouse_middle, | 1507 | static DEVICE_ATTR(mouse_middle, |
1508 | S_IRUGO | S_IWUGO, | 1508 | S_IRUGO | S_IWUSR, |
1509 | show_tabletMouseMiddle, store_tabletMouseMiddle); | 1509 | show_tabletMouseMiddle, store_tabletMouseMiddle); |
1510 | 1510 | ||
1511 | /*********************************************************************** | 1511 | /*********************************************************************** |
@@ -1535,7 +1535,7 @@ store_tabletMouseRight(struct device *dev, struct device_attribute *attr, const | |||
1535 | } | 1535 | } |
1536 | 1536 | ||
1537 | static DEVICE_ATTR(mouse_right, | 1537 | static DEVICE_ATTR(mouse_right, |
1538 | S_IRUGO | S_IWUGO, | 1538 | S_IRUGO | S_IWUSR, |
1539 | show_tabletMouseRight, store_tabletMouseRight); | 1539 | show_tabletMouseRight, store_tabletMouseRight); |
1540 | 1540 | ||
1541 | /*********************************************************************** | 1541 | /*********************************************************************** |
@@ -1567,7 +1567,7 @@ store_tabletWheel(struct device *dev, struct device_attribute *attr, const char | |||
1567 | } | 1567 | } |
1568 | 1568 | ||
1569 | static DEVICE_ATTR(wheel, | 1569 | static DEVICE_ATTR(wheel, |
1570 | S_IRUGO | S_IWUGO, show_tabletWheel, store_tabletWheel); | 1570 | S_IRUGO | S_IWUSR, show_tabletWheel, store_tabletWheel); |
1571 | 1571 | ||
1572 | /*********************************************************************** | 1572 | /*********************************************************************** |
1573 | * support routines for the 'execute' file. Note that this file | 1573 | * support routines for the 'execute' file. Note that this file |
@@ -1600,7 +1600,7 @@ store_tabletExecute(struct device *dev, struct device_attribute *attr, const cha | |||
1600 | } | 1600 | } |
1601 | 1601 | ||
1602 | static DEVICE_ATTR(execute, | 1602 | static DEVICE_ATTR(execute, |
1603 | S_IRUGO | S_IWUGO, show_tabletExecute, store_tabletExecute); | 1603 | S_IRUGO | S_IWUSR, show_tabletExecute, store_tabletExecute); |
1604 | 1604 | ||
1605 | /*********************************************************************** | 1605 | /*********************************************************************** |
1606 | * support routines for the 'odm_code' file. Note that this file | 1606 | * support routines for the 'odm_code' file. Note that this file |
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c index 3782f31f06d2..33facd0c45d1 100644 --- a/drivers/leds/leds-lp5521.c +++ b/drivers/leds/leds-lp5521.c | |||
@@ -125,11 +125,22 @@ struct lp5521_chip { | |||
125 | u8 num_leds; | 125 | u8 num_leds; |
126 | }; | 126 | }; |
127 | 127 | ||
128 | #define cdev_to_led(c) container_of(c, struct lp5521_led, cdev) | 128 | static inline struct lp5521_led *cdev_to_led(struct led_classdev *cdev) |
129 | #define engine_to_lp5521(eng) container_of((eng), struct lp5521_chip, \ | 129 | { |
130 | engines[(eng)->id - 1]) | 130 | return container_of(cdev, struct lp5521_led, cdev); |
131 | #define led_to_lp5521(led) container_of((led), struct lp5521_chip, \ | 131 | } |
132 | leds[(led)->id]) | 132 | |
133 | static inline struct lp5521_chip *engine_to_lp5521(struct lp5521_engine *engine) | ||
134 | { | ||
135 | return container_of(engine, struct lp5521_chip, | ||
136 | engines[engine->id - 1]); | ||
137 | } | ||
138 | |||
139 | static inline struct lp5521_chip *led_to_lp5521(struct lp5521_led *led) | ||
140 | { | ||
141 | return container_of(led, struct lp5521_chip, | ||
142 | leds[led->id]); | ||
143 | } | ||
133 | 144 | ||
134 | static void lp5521_led_brightness_work(struct work_struct *work); | 145 | static void lp5521_led_brightness_work(struct work_struct *work); |
135 | 146 | ||
@@ -185,14 +196,17 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern) | |||
185 | 196 | ||
186 | /* move current engine to direct mode and remember the state */ | 197 | /* move current engine to direct mode and remember the state */ |
187 | ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT); | 198 | ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT); |
188 | usleep_range(1000, 10000); | 199 | /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ |
200 | usleep_range(1000, 2000); | ||
189 | ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode); | 201 | ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode); |
190 | 202 | ||
191 | /* For loading, all the engines to load mode */ | 203 | /* For loading, all the engines to load mode */ |
192 | lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); | 204 | lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); |
193 | usleep_range(1000, 10000); | 205 | /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ |
206 | usleep_range(1000, 2000); | ||
194 | lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD); | 207 | lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD); |
195 | usleep_range(1000, 10000); | 208 | /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ |
209 | usleep_range(1000, 2000); | ||
196 | 210 | ||
197 | addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE; | 211 | addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE; |
198 | i2c_smbus_write_i2c_block_data(client, | 212 | i2c_smbus_write_i2c_block_data(client, |
@@ -231,10 +245,6 @@ static int lp5521_configure(struct i2c_client *client, | |||
231 | 245 | ||
232 | lp5521_init_engine(chip, attr_group); | 246 | lp5521_init_engine(chip, attr_group); |
233 | 247 | ||
234 | lp5521_write(client, LP5521_REG_RESET, 0xff); | ||
235 | |||
236 | usleep_range(10000, 20000); | ||
237 | |||
238 | /* Set all PWMs to direct control mode */ | 248 | /* Set all PWMs to direct control mode */ |
239 | ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F); | 249 | ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F); |
240 | 250 | ||
@@ -251,8 +261,8 @@ static int lp5521_configure(struct i2c_client *client, | |||
251 | ret |= lp5521_write(client, LP5521_REG_ENABLE, | 261 | ret |= lp5521_write(client, LP5521_REG_ENABLE, |
252 | LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM | | 262 | LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM | |
253 | LP5521_EXEC_RUN); | 263 | LP5521_EXEC_RUN); |
254 | /* enable takes 500us */ | 264 | /* enable takes 500us. 1 - 2 ms leaves some margin */ |
255 | usleep_range(500, 20000); | 265 | usleep_range(1000, 2000); |
256 | 266 | ||
257 | return ret; | 267 | return ret; |
258 | } | 268 | } |
@@ -305,7 +315,8 @@ static int lp5521_detect(struct i2c_client *client) | |||
305 | LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM); | 315 | LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM); |
306 | if (ret) | 316 | if (ret) |
307 | return ret; | 317 | return ret; |
308 | usleep_range(1000, 10000); | 318 | /* enable takes 500us. 1 - 2 ms leaves some margin */ |
319 | usleep_range(1000, 2000); | ||
309 | ret = lp5521_read(client, LP5521_REG_ENABLE, &buf); | 320 | ret = lp5521_read(client, LP5521_REG_ENABLE, &buf); |
310 | if (ret) | 321 | if (ret) |
311 | return ret; | 322 | return ret; |
@@ -693,11 +704,16 @@ static int lp5521_probe(struct i2c_client *client, | |||
693 | 704 | ||
694 | if (pdata->enable) { | 705 | if (pdata->enable) { |
695 | pdata->enable(0); | 706 | pdata->enable(0); |
696 | usleep_range(1000, 10000); | 707 | usleep_range(1000, 2000); /* Keep enable down at least 1ms */ |
697 | pdata->enable(1); | 708 | pdata->enable(1); |
698 | usleep_range(1000, 10000); /* Spec says min 500us */ | 709 | usleep_range(1000, 2000); /* 500us abs min. */ |
699 | } | 710 | } |
700 | 711 | ||
712 | lp5521_write(client, LP5521_REG_RESET, 0xff); | ||
713 | usleep_range(10000, 20000); /* | ||
714 | * Exact value is not available. 10 - 20ms | ||
715 | * appears to be enough for reset. | ||
716 | */ | ||
701 | ret = lp5521_detect(client); | 717 | ret = lp5521_detect(client); |
702 | 718 | ||
703 | if (ret) { | 719 | if (ret) { |
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index 1e11fcc08b28..0cc4ead2fd8b 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c | |||
@@ -134,15 +134,18 @@ struct lp5523_chip { | |||
134 | u8 num_leds; | 134 | u8 num_leds; |
135 | }; | 135 | }; |
136 | 136 | ||
137 | #define cdev_to_led(c) container_of(c, struct lp5523_led, cdev) | 137 | static inline struct lp5523_led *cdev_to_led(struct led_classdev *cdev) |
138 | { | ||
139 | return container_of(cdev, struct lp5523_led, cdev); | ||
140 | } | ||
138 | 141 | ||
139 | static struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine) | 142 | static inline struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine) |
140 | { | 143 | { |
141 | return container_of(engine, struct lp5523_chip, | 144 | return container_of(engine, struct lp5523_chip, |
142 | engines[engine->id - 1]); | 145 | engines[engine->id - 1]); |
143 | } | 146 | } |
144 | 147 | ||
145 | static struct lp5523_chip *led_to_lp5523(struct lp5523_led *led) | 148 | static inline struct lp5523_chip *led_to_lp5523(struct lp5523_led *led) |
146 | { | 149 | { |
147 | return container_of(led, struct lp5523_chip, | 150 | return container_of(led, struct lp5523_chip, |
148 | leds[led->id]); | 151 | leds[led->id]); |
@@ -200,13 +203,9 @@ static int lp5523_configure(struct i2c_client *client) | |||
200 | { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0}, | 203 | { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0}, |
201 | }; | 204 | }; |
202 | 205 | ||
203 | lp5523_write(client, LP5523_REG_RESET, 0xff); | ||
204 | |||
205 | usleep_range(10000, 100000); | ||
206 | |||
207 | ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE); | 206 | ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE); |
208 | /* Chip startup time after reset is 500 us */ | 207 | /* Chip startup time is 500 us, 1 - 2 ms gives some margin */ |
209 | usleep_range(1000, 10000); | 208 | usleep_range(1000, 2000); |
210 | 209 | ||
211 | ret |= lp5523_write(client, LP5523_REG_CONFIG, | 210 | ret |= lp5523_write(client, LP5523_REG_CONFIG, |
212 | LP5523_AUTO_INC | LP5523_PWR_SAVE | | 211 | LP5523_AUTO_INC | LP5523_PWR_SAVE | |
@@ -243,8 +242,8 @@ static int lp5523_configure(struct i2c_client *client) | |||
243 | return -1; | 242 | return -1; |
244 | } | 243 | } |
245 | 244 | ||
246 | /* Wait 3ms and check the engine status */ | 245 | /* Let the programs run for couple of ms and check the engine status */ |
247 | usleep_range(3000, 20000); | 246 | usleep_range(3000, 6000); |
248 | lp5523_read(client, LP5523_REG_STATUS, &status); | 247 | lp5523_read(client, LP5523_REG_STATUS, &status); |
249 | status &= LP5523_ENG_STATUS_MASK; | 248 | status &= LP5523_ENG_STATUS_MASK; |
250 | 249 | ||
@@ -449,10 +448,10 @@ static ssize_t lp5523_selftest(struct device *dev, | |||
449 | /* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */ | 448 | /* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */ |
450 | lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL, | 449 | lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL, |
451 | LP5523_EN_LEDTEST | 16); | 450 | LP5523_EN_LEDTEST | 16); |
452 | usleep_range(3000, 10000); | 451 | usleep_range(3000, 6000); /* ADC conversion time is typically 2.7 ms */ |
453 | ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); | 452 | ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); |
454 | if (!(status & LP5523_LEDTEST_DONE)) | 453 | if (!(status & LP5523_LEDTEST_DONE)) |
455 | usleep_range(3000, 10000); | 454 | usleep_range(3000, 6000); /* Was not ready. Wait little bit */ |
456 | 455 | ||
457 | ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd); | 456 | ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd); |
458 | vdd--; /* There may be some fluctuation in measurement */ | 457 | vdd--; /* There may be some fluctuation in measurement */ |
@@ -468,16 +467,16 @@ static ssize_t lp5523_selftest(struct device *dev, | |||
468 | chip->pdata->led_config[i].led_current); | 467 | chip->pdata->led_config[i].led_current); |
469 | 468 | ||
470 | lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff); | 469 | lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff); |
471 | /* let current stabilize 2ms before measurements start */ | 470 | /* let current stabilize 2 - 4ms before measurements start */ |
472 | usleep_range(2000, 10000); | 471 | usleep_range(2000, 4000); |
473 | lp5523_write(chip->client, | 472 | lp5523_write(chip->client, |
474 | LP5523_REG_LED_TEST_CTRL, | 473 | LP5523_REG_LED_TEST_CTRL, |
475 | LP5523_EN_LEDTEST | i); | 474 | LP5523_EN_LEDTEST | i); |
476 | /* ledtest takes 2.7ms */ | 475 | /* ADC conversion time is 2.7 ms typically */ |
477 | usleep_range(3000, 10000); | 476 | usleep_range(3000, 6000); |
478 | ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); | 477 | ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); |
479 | if (!(status & LP5523_LEDTEST_DONE)) | 478 | if (!(status & LP5523_LEDTEST_DONE)) |
480 | usleep_range(3000, 10000); | 479 | usleep_range(3000, 6000);/* Was not ready. Wait. */ |
481 | ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc); | 480 | ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc); |
482 | 481 | ||
483 | if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM) | 482 | if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM) |
@@ -930,11 +929,16 @@ static int lp5523_probe(struct i2c_client *client, | |||
930 | 929 | ||
931 | if (pdata->enable) { | 930 | if (pdata->enable) { |
932 | pdata->enable(0); | 931 | pdata->enable(0); |
933 | usleep_range(1000, 10000); | 932 | usleep_range(1000, 2000); /* Keep enable down at least 1ms */ |
934 | pdata->enable(1); | 933 | pdata->enable(1); |
935 | usleep_range(1000, 10000); /* Spec says min 500us */ | 934 | usleep_range(1000, 2000); /* 500us abs min. */ |
936 | } | 935 | } |
937 | 936 | ||
937 | lp5523_write(client, LP5523_REG_RESET, 0xff); | ||
938 | usleep_range(10000, 20000); /* | ||
939 | * Exact value is not available. 10 - 20ms | ||
940 | * appears to be enough for reset. | ||
941 | */ | ||
938 | ret = lp5523_detect(client); | 942 | ret = lp5523_detect(client); |
939 | if (ret) | 943 | if (ret) |
940 | goto fail2; | 944 | goto fail2; |
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c index a688293abd0b..614ebebaaa28 100644 --- a/drivers/leds/leds-ss4200.c +++ b/drivers/leds/leds-ss4200.c | |||
@@ -102,6 +102,7 @@ static struct dmi_system_id __initdata nas_led_whitelist[] = { | |||
102 | DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00") | 102 | DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00") |
103 | } | 103 | } |
104 | }, | 104 | }, |
105 | {} | ||
105 | }; | 106 | }; |
106 | 107 | ||
107 | /* | 108 | /* |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 324a3663fcda..84c46a161927 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1337,7 +1337,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) | |||
1337 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, | 1337 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, |
1338 | rdev->sb_page); | 1338 | rdev->sb_page); |
1339 | md_super_wait(rdev->mddev); | 1339 | md_super_wait(rdev->mddev); |
1340 | return num_sectors / 2; /* kB for sysfs */ | 1340 | return num_sectors; |
1341 | } | 1341 | } |
1342 | 1342 | ||
1343 | 1343 | ||
@@ -1704,7 +1704,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) | |||
1704 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, | 1704 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, |
1705 | rdev->sb_page); | 1705 | rdev->sb_page); |
1706 | md_super_wait(rdev->mddev); | 1706 | md_super_wait(rdev->mddev); |
1707 | return num_sectors / 2; /* kB for sysfs */ | 1707 | return num_sectors; |
1708 | } | 1708 | } |
1709 | 1709 | ||
1710 | static struct super_type super_types[] = { | 1710 | static struct super_type super_types[] = { |
@@ -4338,6 +4338,8 @@ static int md_alloc(dev_t dev, char *name) | |||
4338 | if (mddev->kobj.sd && | 4338 | if (mddev->kobj.sd && |
4339 | sysfs_create_group(&mddev->kobj, &md_bitmap_group)) | 4339 | sysfs_create_group(&mddev->kobj, &md_bitmap_group)) |
4340 | printk(KERN_DEBUG "pointless warning\n"); | 4340 | printk(KERN_DEBUG "pointless warning\n"); |
4341 | |||
4342 | blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA); | ||
4341 | abort: | 4343 | abort: |
4342 | mutex_unlock(&disks_mutex); | 4344 | mutex_unlock(&disks_mutex); |
4343 | if (!error && mddev->kobj.sd) { | 4345 | if (!error && mddev->kobj.sd) { |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 45f8324196ec..845cf95b612c 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1161,6 +1161,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number) | |||
1161 | * is not possible. | 1161 | * is not possible. |
1162 | */ | 1162 | */ |
1163 | if (!test_bit(Faulty, &rdev->flags) && | 1163 | if (!test_bit(Faulty, &rdev->flags) && |
1164 | !mddev->recovery_disabled && | ||
1164 | mddev->degraded < conf->raid_disks) { | 1165 | mddev->degraded < conf->raid_disks) { |
1165 | err = -EBUSY; | 1166 | err = -EBUSY; |
1166 | goto abort; | 1167 | goto abort; |
diff --git a/drivers/media/common/saa7146_i2c.c b/drivers/media/common/saa7146_i2c.c index 3d88542612ea..74ee172b5bc9 100644 --- a/drivers/media/common/saa7146_i2c.c +++ b/drivers/media/common/saa7146_i2c.c | |||
@@ -391,7 +391,6 @@ static int saa7146_i2c_xfer(struct i2c_adapter* adapter, struct i2c_msg *msg, in | |||
391 | 391 | ||
392 | /*****************************************************************************/ | 392 | /*****************************************************************************/ |
393 | /* i2c-adapter helper functions */ | 393 | /* i2c-adapter helper functions */ |
394 | #include <linux/i2c-id.h> | ||
395 | 394 | ||
396 | /* exported algorithm data */ | 395 | /* exported algorithm data */ |
397 | static struct i2c_algorithm saa7146_algo = { | 396 | static struct i2c_algorithm saa7146_algo = { |
diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c index 4d0646da6087..7ea517b7e186 100644 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
37 | #include <linux/spinlock.h> | 37 | #include <linux/spinlock.h> |
38 | #include <linux/sched.h> | 38 | #include <linux/sched.h> |
39 | #include <linux/smp_lock.h> | ||
40 | #include <linux/kthread.h> | 39 | #include <linux/kthread.h> |
41 | 40 | ||
42 | #include "dvb_ca_en50221.h" | 41 | #include "dvb_ca_en50221.h" |
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c index 1589d5a5cb46..cad6634610ea 100644 --- a/drivers/media/dvb/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb/dvb-core/dvb_frontend.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <linux/list.h> | 36 | #include <linux/list.h> |
37 | #include <linux/freezer.h> | 37 | #include <linux/freezer.h> |
38 | #include <linux/jiffies.h> | 38 | #include <linux/jiffies.h> |
39 | #include <linux/smp_lock.h> | ||
40 | #include <linux/kthread.h> | 39 | #include <linux/kthread.h> |
41 | #include <asm/processor.h> | 40 | #include <asm/processor.h> |
42 | 41 | ||
diff --git a/drivers/media/dvb/ngene/ngene-core.c b/drivers/media/dvb/ngene/ngene-core.c index 4caeb163a666..3a7ef71087be 100644 --- a/drivers/media/dvb/ngene/ngene-core.c +++ b/drivers/media/dvb/ngene/ngene-core.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/io.h> | 34 | #include <linux/io.h> |
35 | #include <asm/div64.h> | 35 | #include <asm/div64.h> |
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | #include <linux/smp_lock.h> | ||
38 | #include <linux/timer.h> | 37 | #include <linux/timer.h> |
39 | #include <linux/byteorder/generic.h> | 38 | #include <linux/byteorder/generic.h> |
40 | #include <linux/firmware.h> | 39 | #include <linux/firmware.h> |
diff --git a/drivers/media/dvb/ngene/ngene-dvb.c b/drivers/media/dvb/ngene/ngene-dvb.c index 48f980b21d66..3832e5983c19 100644 --- a/drivers/media/dvb/ngene/ngene-dvb.c +++ b/drivers/media/dvb/ngene/ngene-dvb.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/io.h> | 35 | #include <linux/io.h> |
36 | #include <asm/div64.h> | 36 | #include <asm/div64.h> |
37 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
38 | #include <linux/smp_lock.h> | ||
39 | #include <linux/timer.h> | 38 | #include <linux/timer.h> |
40 | #include <linux/byteorder/generic.h> | 39 | #include <linux/byteorder/generic.h> |
41 | #include <linux/firmware.h> | 40 | #include <linux/firmware.h> |
diff --git a/drivers/media/dvb/ngene/ngene-i2c.c b/drivers/media/dvb/ngene/ngene-i2c.c index c3ae956714e7..d28554f8ce99 100644 --- a/drivers/media/dvb/ngene/ngene-i2c.c +++ b/drivers/media/dvb/ngene/ngene-i2c.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <asm/div64.h> | 37 | #include <asm/div64.h> |
38 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
39 | #include <linux/pci_ids.h> | 39 | #include <linux/pci_ids.h> |
40 | #include <linux/smp_lock.h> | ||
41 | #include <linux/timer.h> | 40 | #include <linux/timer.h> |
42 | #include <linux/byteorder/generic.h> | 41 | #include <linux/byteorder/generic.h> |
43 | #include <linux/firmware.h> | 42 | #include <linux/firmware.h> |
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c index b540e8072e92..e6b2d085a449 100644 --- a/drivers/media/radio/radio-mr800.c +++ b/drivers/media/radio/radio-mr800.c | |||
@@ -58,7 +58,6 @@ | |||
58 | #include <linux/module.h> | 58 | #include <linux/module.h> |
59 | #include <linux/init.h> | 59 | #include <linux/init.h> |
60 | #include <linux/slab.h> | 60 | #include <linux/slab.h> |
61 | #include <linux/smp_lock.h> | ||
62 | #include <linux/input.h> | 61 | #include <linux/input.h> |
63 | #include <linux/videodev2.h> | 62 | #include <linux/videodev2.h> |
64 | #include <media/v4l2-device.h> | 63 | #include <media/v4l2-device.h> |
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h index ea12782359a0..b9914d7a0c9f 100644 --- a/drivers/media/radio/si470x/radio-si470x.h +++ b/drivers/media/radio/si470x/radio-si470x.h | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/smp_lock.h> | ||
35 | #include <linux/input.h> | 34 | #include <linux/input.h> |
36 | #include <linux/version.h> | 35 | #include <linux/version.h> |
37 | #include <linux/videodev2.h> | 36 | #include <linux/videodev2.h> |
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c index 3da6e80e1041..a529619e51f6 100644 --- a/drivers/media/video/bt8xx/bttv-driver.c +++ b/drivers/media/video/bt8xx/bttv-driver.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include <linux/fs.h> | 42 | #include <linux/fs.h> |
43 | #include <linux/kernel.h> | 43 | #include <linux/kernel.h> |
44 | #include <linux/sched.h> | 44 | #include <linux/sched.h> |
45 | #include <linux/smp_lock.h> | ||
46 | #include <linux/interrupt.h> | 45 | #include <linux/interrupt.h> |
47 | #include <linux/kdev_t.h> | 46 | #include <linux/kdev_t.h> |
48 | #include "bttvp.h" | 47 | #include "bttvp.h" |
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c index 417d1d5c73c4..d7c94848249e 100644 --- a/drivers/media/video/cx88/cx88-blackbird.c +++ b/drivers/media/video/cx88/cx88-blackbird.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
34 | #include <linux/device.h> | 34 | #include <linux/device.h> |
35 | #include <linux/firmware.h> | 35 | #include <linux/firmware.h> |
36 | #include <linux/smp_lock.h> | ||
37 | #include <media/v4l2-common.h> | 36 | #include <media/v4l2-common.h> |
38 | #include <media/v4l2-ioctl.h> | 37 | #include <media/v4l2-ioctl.h> |
39 | #include <media/cx2341x.h> | 38 | #include <media/cx2341x.h> |
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c index d2f159daa8b5..88b51194f917 100644 --- a/drivers/media/video/cx88/cx88-video.c +++ b/drivers/media/video/cx88/cx88-video.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/kmod.h> | 31 | #include <linux/kmod.h> |
32 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/smp_lock.h> | ||
35 | #include <linux/interrupt.h> | 34 | #include <linux/interrupt.h> |
36 | #include <linux/dma-mapping.h> | 35 | #include <linux/dma-mapping.h> |
37 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
diff --git a/drivers/media/video/imx074.c b/drivers/media/video/imx074.c index 380e459f899d..27b5dfdfbb93 100644 --- a/drivers/media/video/imx074.c +++ b/drivers/media/video/imx074.c | |||
@@ -451,7 +451,6 @@ static int imx074_probe(struct i2c_client *client, | |||
451 | ret = imx074_video_probe(icd, client); | 451 | ret = imx074_video_probe(icd, client); |
452 | if (ret < 0) { | 452 | if (ret < 0) { |
453 | icd->ops = NULL; | 453 | icd->ops = NULL; |
454 | i2c_set_clientdata(client, NULL); | ||
455 | kfree(priv); | 454 | kfree(priv); |
456 | return ret; | 455 | return ret; |
457 | } | 456 | } |
@@ -468,7 +467,6 @@ static int imx074_remove(struct i2c_client *client) | |||
468 | icd->ops = NULL; | 467 | icd->ops = NULL; |
469 | if (icl->free_bus) | 468 | if (icl->free_bus) |
470 | icl->free_bus(icl); | 469 | icl->free_bus(icl); |
471 | i2c_set_clientdata(client, NULL); | ||
472 | client->driver = NULL; | 470 | client->driver = NULL; |
473 | kfree(priv); | 471 | kfree(priv); |
474 | 472 | ||
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c index 5a000c65ae98..ce4a75375909 100644 --- a/drivers/media/video/ir-kbd-i2c.c +++ b/drivers/media/video/ir-kbd-i2c.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <linux/errno.h> | 44 | #include <linux/errno.h> |
45 | #include <linux/slab.h> | 45 | #include <linux/slab.h> |
46 | #include <linux/i2c.h> | 46 | #include <linux/i2c.h> |
47 | #include <linux/i2c-id.h> | ||
48 | #include <linux/workqueue.h> | 47 | #include <linux/workqueue.h> |
49 | 48 | ||
50 | #include <media/ir-core.h> | 49 | #include <media/ir-core.h> |
diff --git a/drivers/media/video/ov6650.c b/drivers/media/video/ov6650.c index 31f19373bbae..cf93de988068 100644 --- a/drivers/media/video/ov6650.c +++ b/drivers/media/video/ov6650.c | |||
@@ -1174,7 +1174,6 @@ static int ov6650_probe(struct i2c_client *client, | |||
1174 | 1174 | ||
1175 | if (ret) { | 1175 | if (ret) { |
1176 | icd->ops = NULL; | 1176 | icd->ops = NULL; |
1177 | i2c_set_clientdata(client, NULL); | ||
1178 | kfree(priv); | 1177 | kfree(priv); |
1179 | } | 1178 | } |
1180 | 1179 | ||
@@ -1185,7 +1184,6 @@ static int ov6650_remove(struct i2c_client *client) | |||
1185 | { | 1184 | { |
1186 | struct ov6650 *priv = to_ov6650(client); | 1185 | struct ov6650 *priv = to_ov6650(client); |
1187 | 1186 | ||
1188 | i2c_set_clientdata(client, NULL); | ||
1189 | kfree(priv); | 1187 | kfree(priv); |
1190 | return 0; | 1188 | return 0; |
1191 | } | 1189 | } |
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c index e62beb4efdb4..f3dc89da4c4e 100644 --- a/drivers/media/video/pwc/pwc-if.c +++ b/drivers/media/video/pwc/pwc-if.c | |||
@@ -62,7 +62,6 @@ | |||
62 | #include <linux/module.h> | 62 | #include <linux/module.h> |
63 | #include <linux/poll.h> | 63 | #include <linux/poll.h> |
64 | #include <linux/slab.h> | 64 | #include <linux/slab.h> |
65 | #include <linux/smp_lock.h> | ||
66 | #ifdef CONFIG_USB_PWC_INPUT_EVDEV | 65 | #ifdef CONFIG_USB_PWC_INPUT_EVDEV |
67 | #include <linux/usb/input.h> | 66 | #include <linux/usb/input.h> |
68 | #endif | 67 | #endif |
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c index f5a46c458717..a845753665c1 100644 --- a/drivers/media/video/s2255drv.c +++ b/drivers/media/video/s2255drv.c | |||
@@ -49,7 +49,6 @@ | |||
49 | #include <linux/videodev2.h> | 49 | #include <linux/videodev2.h> |
50 | #include <linux/version.h> | 50 | #include <linux/version.h> |
51 | #include <linux/mm.h> | 51 | #include <linux/mm.h> |
52 | #include <linux/smp_lock.h> | ||
53 | #include <media/videobuf-vmalloc.h> | 52 | #include <media/videobuf-vmalloc.h> |
54 | #include <media/v4l2-common.h> | 53 | #include <media/v4l2-common.h> |
55 | #include <media/v4l2-device.h> | 54 | #include <media/v4l2-device.h> |
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c index 1467a30a434f..b890aafe7d64 100644 --- a/drivers/media/video/saa7134/saa7134-empress.c +++ b/drivers/media/video/saa7134/saa7134-empress.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/list.h> | 21 | #include <linux/list.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/smp_lock.h> | ||
25 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
26 | 25 | ||
27 | #include "saa7134-reg.h" | 26 | #include "saa7134-reg.h" |
diff --git a/drivers/media/video/saa7164/saa7164.h b/drivers/media/video/saa7164/saa7164.h index 1d9c5cbbbc52..041ae8e20f68 100644 --- a/drivers/media/video/saa7164/saa7164.h +++ b/drivers/media/video/saa7164/saa7164.h | |||
@@ -58,7 +58,6 @@ | |||
58 | #include <media/tveeprom.h> | 58 | #include <media/tveeprom.h> |
59 | #include <media/videobuf-dma-sg.h> | 59 | #include <media/videobuf-dma-sg.h> |
60 | #include <media/videobuf-dvb.h> | 60 | #include <media/videobuf-dvb.h> |
61 | #include <linux/smp_lock.h> | ||
62 | #include <dvb_demux.h> | 61 | #include <dvb_demux.h> |
63 | #include <dvb_frontend.h> | 62 | #include <dvb_frontend.h> |
64 | #include <dvb_net.h> | 63 | #include <dvb_net.h> |
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c index db6b828594f5..011c0c386995 100644 --- a/drivers/media/video/usbvision/usbvision-video.c +++ b/drivers/media/video/usbvision/usbvision-video.c | |||
@@ -50,7 +50,6 @@ | |||
50 | #include <linux/list.h> | 50 | #include <linux/list.h> |
51 | #include <linux/timer.h> | 51 | #include <linux/timer.h> |
52 | #include <linux/slab.h> | 52 | #include <linux/slab.h> |
53 | #include <linux/smp_lock.h> | ||
54 | #include <linux/mm.h> | 53 | #include <linux/mm.h> |
55 | #include <linux/highmem.h> | 54 | #include <linux/highmem.h> |
56 | #include <linux/vmalloc.h> | 55 | #include <linux/vmalloc.h> |
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c index 86294ed35c9b..e30e8dfb6205 100644 --- a/drivers/media/video/v4l2-compat-ioctl32.c +++ b/drivers/media/video/v4l2-compat-ioctl32.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/videodev.h> | 18 | #include <linux/videodev.h> |
19 | #include <linux/videodev2.h> | 19 | #include <linux/videodev2.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/smp_lock.h> | ||
22 | #include <media/v4l2-ioctl.h> | 21 | #include <media/v4l2-ioctl.h> |
23 | 22 | ||
24 | #ifdef CONFIG_COMPAT | 23 | #ifdef CONFIG_COMPAT |
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index e15220ff52fc..d784c36707c0 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c | |||
@@ -97,8 +97,7 @@ static u8 mptfcInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; | |||
97 | 97 | ||
98 | static int mptfc_target_alloc(struct scsi_target *starget); | 98 | static int mptfc_target_alloc(struct scsi_target *starget); |
99 | static int mptfc_slave_alloc(struct scsi_device *sdev); | 99 | static int mptfc_slave_alloc(struct scsi_device *sdev); |
100 | static int mptfc_qcmd(struct scsi_cmnd *SCpnt, | 100 | static int mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt); |
101 | void (*done)(struct scsi_cmnd *)); | ||
102 | static void mptfc_target_destroy(struct scsi_target *starget); | 101 | static void mptfc_target_destroy(struct scsi_target *starget); |
103 | static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout); | 102 | static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout); |
104 | static void __devexit mptfc_remove(struct pci_dev *pdev); | 103 | static void __devexit mptfc_remove(struct pci_dev *pdev); |
@@ -650,7 +649,7 @@ mptfc_slave_alloc(struct scsi_device *sdev) | |||
650 | } | 649 | } |
651 | 650 | ||
652 | static int | 651 | static int |
653 | mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 652 | mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) |
654 | { | 653 | { |
655 | struct mptfc_rport_info *ri; | 654 | struct mptfc_rport_info *ri; |
656 | struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device)); | 655 | struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device)); |
@@ -681,6 +680,8 @@ mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | |||
681 | return mptscsih_qcmd(SCpnt,done); | 680 | return mptscsih_qcmd(SCpnt,done); |
682 | } | 681 | } |
683 | 682 | ||
683 | static DEF_SCSI_QCMD(mptfc_qcmd) | ||
684 | |||
684 | /* | 685 | /* |
685 | * mptfc_display_port_link_speed - displaying link speed | 686 | * mptfc_display_port_link_speed - displaying link speed |
686 | * @ioc: Pointer to MPT_ADAPTER structure | 687 | * @ioc: Pointer to MPT_ADAPTER structure |
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 83a5115f0251..d48c2c6058e1 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
@@ -1889,7 +1889,7 @@ mptsas_slave_alloc(struct scsi_device *sdev) | |||
1889 | } | 1889 | } |
1890 | 1890 | ||
1891 | static int | 1891 | static int |
1892 | mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 1892 | mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) |
1893 | { | 1893 | { |
1894 | MPT_SCSI_HOST *hd; | 1894 | MPT_SCSI_HOST *hd; |
1895 | MPT_ADAPTER *ioc; | 1895 | MPT_ADAPTER *ioc; |
@@ -1913,6 +1913,8 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | |||
1913 | return mptscsih_qcmd(SCpnt,done); | 1913 | return mptscsih_qcmd(SCpnt,done); |
1914 | } | 1914 | } |
1915 | 1915 | ||
1916 | static DEF_SCSI_QCMD(mptsas_qcmd) | ||
1917 | |||
1916 | /** | 1918 | /** |
1917 | * mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout | 1919 | * mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout |
1918 | * if the device under question is currently in the | 1920 | * if the device under question is currently in the |
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index 0e2803155ae2..6d9568d2ec59 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c | |||
@@ -780,7 +780,7 @@ static int mptspi_slave_configure(struct scsi_device *sdev) | |||
780 | } | 780 | } |
781 | 781 | ||
782 | static int | 782 | static int |
783 | mptspi_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 783 | mptspi_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) |
784 | { | 784 | { |
785 | struct _MPT_SCSI_HOST *hd = shost_priv(SCpnt->device->host); | 785 | struct _MPT_SCSI_HOST *hd = shost_priv(SCpnt->device->host); |
786 | VirtDevice *vdevice = SCpnt->device->hostdata; | 786 | VirtDevice *vdevice = SCpnt->device->hostdata; |
@@ -805,6 +805,8 @@ mptspi_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | |||
805 | return mptscsih_qcmd(SCpnt,done); | 805 | return mptscsih_qcmd(SCpnt,done); |
806 | } | 806 | } |
807 | 807 | ||
808 | static DEF_SCSI_QCMD(mptspi_qcmd) | ||
809 | |||
808 | static void mptspi_slave_destroy(struct scsi_device *sdev) | 810 | static void mptspi_slave_destroy(struct scsi_device *sdev) |
809 | { | 811 | { |
810 | struct scsi_target *starget = scsi_target(sdev); | 812 | struct scsi_target *starget = scsi_target(sdev); |
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c index ea6b2197da8a..97bdf82ec905 100644 --- a/drivers/message/i2o/i2o_scsi.c +++ b/drivers/message/i2o/i2o_scsi.c | |||
@@ -506,7 +506,7 @@ static struct i2o_driver i2o_scsi_driver = { | |||
506 | * Locks: takes the controller lock on error path only | 506 | * Locks: takes the controller lock on error path only |
507 | */ | 507 | */ |
508 | 508 | ||
509 | static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, | 509 | static int i2o_scsi_queuecommand_lck(struct scsi_cmnd *SCpnt, |
510 | void (*done) (struct scsi_cmnd *)) | 510 | void (*done) (struct scsi_cmnd *)) |
511 | { | 511 | { |
512 | struct i2o_controller *c; | 512 | struct i2o_controller *c; |
@@ -688,7 +688,9 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, | |||
688 | 688 | ||
689 | exit: | 689 | exit: |
690 | return rc; | 690 | return rc; |
691 | }; | 691 | } |
692 | |||
693 | static DEF_SCSI_QCMD(i2o_scsi_queuecommand) | ||
692 | 694 | ||
693 | /** | 695 | /** |
694 | * i2o_scsi_abort - abort a running command | 696 | * i2o_scsi_abort - abort a running command |
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c index 0ed09358027e..644d4cd071cc 100644 --- a/drivers/misc/apds9802als.c +++ b/drivers/misc/apds9802als.c | |||
@@ -251,7 +251,6 @@ static int apds9802als_probe(struct i2c_client *client, | |||
251 | 251 | ||
252 | return res; | 252 | return res; |
253 | als_error1: | 253 | als_error1: |
254 | i2c_set_clientdata(client, NULL); | ||
255 | kfree(data); | 254 | kfree(data); |
256 | return res; | 255 | return res; |
257 | } | 256 | } |
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c index ca47e6285075..307aada5fffe 100644 --- a/drivers/misc/isl29020.c +++ b/drivers/misc/isl29020.c | |||
@@ -183,9 +183,7 @@ static int isl29020_probe(struct i2c_client *client, | |||
183 | 183 | ||
184 | static int isl29020_remove(struct i2c_client *client) | 184 | static int isl29020_remove(struct i2c_client *client) |
185 | { | 185 | { |
186 | struct als_data *data = i2c_get_clientdata(client); | ||
187 | sysfs_remove_group(&client->dev.kobj, &m_als_gr); | 186 | sysfs_remove_group(&client->dev.kobj, &m_als_gr); |
188 | kfree(data); | ||
189 | return 0; | 187 | return 0; |
190 | } | 188 | } |
191 | 189 | ||
@@ -245,6 +243,6 @@ static void __exit sensor_isl29020_exit(void) | |||
245 | module_init(sensor_isl29020_init); | 243 | module_init(sensor_isl29020_init); |
246 | module_exit(sensor_isl29020_exit); | 244 | module_exit(sensor_isl29020_exit); |
247 | 245 | ||
248 | MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com"); | 246 | MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com>"); |
249 | MODULE_DESCRIPTION("Intersil isl29020 ALS Driver"); | 247 | MODULE_DESCRIPTION("Intersil isl29020 ALS Driver"); |
250 | MODULE_LICENSE("GPL v2"); | 248 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index d551f09ccb79..6956f7e7d439 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c | |||
@@ -439,18 +439,23 @@ xpc_discovery(void) | |||
439 | * nodes that can comprise an access protection grouping. The access | 439 | * nodes that can comprise an access protection grouping. The access |
440 | * protection is in regards to memory, IOI and IPI. | 440 | * protection is in regards to memory, IOI and IPI. |
441 | */ | 441 | */ |
442 | max_regions = 64; | ||
443 | region_size = xp_region_size; | 442 | region_size = xp_region_size; |
444 | 443 | ||
445 | switch (region_size) { | 444 | if (is_uv()) |
446 | case 128: | 445 | max_regions = 256; |
447 | max_regions *= 2; | 446 | else { |
448 | case 64: | 447 | max_regions = 64; |
449 | max_regions *= 2; | 448 | |
450 | case 32: | 449 | switch (region_size) { |
451 | max_regions *= 2; | 450 | case 128: |
452 | region_size = 16; | 451 | max_regions *= 2; |
453 | DBUG_ON(!is_shub2()); | 452 | case 64: |
453 | max_regions *= 2; | ||
454 | case 32: | ||
455 | max_regions *= 2; | ||
456 | region_size = 16; | ||
457 | DBUG_ON(!is_shub2()); | ||
458 | } | ||
454 | } | 459 | } |
455 | 460 | ||
456 | for (region = 0; region < max_regions; region++) { | 461 | for (region = 0; region < max_regions; region++) { |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index e1da258bbfb7..0a92436f0538 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -699,7 +699,8 @@ DEFINE_WINDOW_IO(32) | |||
699 | #define DEVICE_PCI(dev) NULL | 699 | #define DEVICE_PCI(dev) NULL |
700 | #endif | 700 | #endif |
701 | 701 | ||
702 | #define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL) | 702 | #define VORTEX_PCI(vp) \ |
703 | ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)) | ||
703 | 704 | ||
704 | #ifdef CONFIG_EISA | 705 | #ifdef CONFIG_EISA |
705 | #define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL) | 706 | #define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL) |
@@ -707,7 +708,8 @@ DEFINE_WINDOW_IO(32) | |||
707 | #define DEVICE_EISA(dev) NULL | 708 | #define DEVICE_EISA(dev) NULL |
708 | #endif | 709 | #endif |
709 | 710 | ||
710 | #define VORTEX_EISA(vp) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL) | 711 | #define VORTEX_EISA(vp) \ |
712 | ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)) | ||
711 | 713 | ||
712 | /* The action to take with a media selection timer tick. | 714 | /* The action to take with a media selection timer tick. |
713 | Note that we deviate from the 3Com order by checking 10base2 before AUI. | 715 | Note that we deviate from the 3Com order by checking 10base2 before AUI. |
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index ac422cd332ea..dd16e83933a2 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -490,13 +490,11 @@ static inline unsigned int cp_rx_csum_ok (u32 status) | |||
490 | { | 490 | { |
491 | unsigned int protocol = (status >> 16) & 0x3; | 491 | unsigned int protocol = (status >> 16) & 0x3; |
492 | 492 | ||
493 | if (likely((protocol == RxProtoTCP) && (!(status & TCPFail)))) | 493 | if (((protocol == RxProtoTCP) && !(status & TCPFail)) || |
494 | ((protocol == RxProtoUDP) && !(status & UDPFail))) | ||
494 | return 1; | 495 | return 1; |
495 | else if ((protocol == RxProtoUDP) && (!(status & UDPFail))) | 496 | else |
496 | return 1; | 497 | return 0; |
497 | else if ((protocol == RxProtoIP) && (!(status & IPFail))) | ||
498 | return 1; | ||
499 | return 0; | ||
500 | } | 498 | } |
501 | 499 | ||
502 | static int cp_rx_poll(struct napi_struct *napi, int budget) | 500 | static int cp_rx_poll(struct napi_struct *napi, int budget) |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f6668cdaac85..43db398437b7 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2945,6 +2945,18 @@ source "drivers/s390/net/Kconfig" | |||
2945 | 2945 | ||
2946 | source "drivers/net/caif/Kconfig" | 2946 | source "drivers/net/caif/Kconfig" |
2947 | 2947 | ||
2948 | config TILE_NET | ||
2949 | tristate "Tilera GBE/XGBE network driver support" | ||
2950 | depends on TILE | ||
2951 | default y | ||
2952 | select CRC32 | ||
2953 | help | ||
2954 | This is a standard Linux network device driver for the | ||
2955 | on-chip Tilera Gigabit Ethernet and XAUI interfaces. | ||
2956 | |||
2957 | To compile this driver as a module, choose M here: the module | ||
2958 | will be called tile_net. | ||
2959 | |||
2948 | config XEN_NETDEV_FRONTEND | 2960 | config XEN_NETDEV_FRONTEND |
2949 | tristate "Xen network device frontend driver" | 2961 | tristate "Xen network device frontend driver" |
2950 | depends on XEN | 2962 | depends on XEN |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 652fc6b98039..b90738d13994 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -301,3 +301,4 @@ obj-$(CONFIG_CAIF) += caif/ | |||
301 | 301 | ||
302 | obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ | 302 | obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ |
303 | obj-$(CONFIG_PCH_GBE) += pch_gbe/ | 303 | obj-$(CONFIG_PCH_GBE) += pch_gbe/ |
304 | obj-$(CONFIG_TILE_NET) += tile/ | ||
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c index 919080b2c3a5..1bf672009948 100644 --- a/drivers/net/atl1c/atl1c_hw.c +++ b/drivers/net/atl1c/atl1c_hw.c | |||
@@ -82,7 +82,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw) | |||
82 | addr[0] = addr[1] = 0; | 82 | addr[0] = addr[1] = 0; |
83 | AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); | 83 | AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); |
84 | if (atl1c_check_eeprom_exist(hw)) { | 84 | if (atl1c_check_eeprom_exist(hw)) { |
85 | if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) { | 85 | if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { |
86 | /* Enable OTP CLK */ | 86 | /* Enable OTP CLK */ |
87 | if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) { | 87 | if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) { |
88 | otp_ctrl_data |= OTP_CTRL_CLK_EN; | 88 | otp_ctrl_data |= OTP_CTRL_CLK_EN; |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index c36cd2ffbadc..93354eee2cfd 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -2458,6 +2458,12 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) | |||
2458 | int status, i = 0, num_imgs = 0; | 2458 | int status, i = 0, num_imgs = 0; |
2459 | const u8 *p; | 2459 | const u8 *p; |
2460 | 2460 | ||
2461 | if (!netif_running(adapter->netdev)) { | ||
2462 | dev_err(&adapter->pdev->dev, | ||
2463 | "Firmware load not allowed (interface is down)\n"); | ||
2464 | return -EPERM; | ||
2465 | } | ||
2466 | |||
2461 | strcpy(fw_file, func); | 2467 | strcpy(fw_file, func); |
2462 | 2468 | ||
2463 | status = request_firmware(&fw, fw_file, &adapter->pdev->dev); | 2469 | status = request_firmware(&fw, fw_file, &adapter->pdev->dev); |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index e9ad16f00b56..9709b8569666 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -9064,7 +9064,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
9064 | default: | 9064 | default: |
9065 | pr_err("Unknown board_type (%ld), aborting\n", | 9065 | pr_err("Unknown board_type (%ld), aborting\n", |
9066 | ent->driver_data); | 9066 | ent->driver_data); |
9067 | return ENODEV; | 9067 | return -ENODEV; |
9068 | } | 9068 | } |
9069 | 9069 | ||
9070 | cid_count += CNIC_CONTEXT_USE; | 9070 | cid_count += CNIC_CONTEXT_USE; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index bdb68a600382..71a169740d05 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -878,8 +878,10 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev) | |||
878 | rcu_read_lock(); | 878 | rcu_read_lock(); |
879 | in_dev = __in_dev_get_rcu(dev); | 879 | in_dev = __in_dev_get_rcu(dev); |
880 | if (in_dev) { | 880 | if (in_dev) { |
881 | read_lock(&in_dev->mc_list_lock); | ||
881 | for (im = in_dev->mc_list; im; im = im->next) | 882 | for (im = in_dev->mc_list; im; im = im->next) |
882 | ip_mc_rejoin_group(im); | 883 | ip_mc_rejoin_group(im); |
884 | read_unlock(&in_dev->mc_list_lock); | ||
883 | } | 885 | } |
884 | 886 | ||
885 | rcu_read_unlock(); | 887 | rcu_read_unlock(); |
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 8b4cea57a6c5..20da1996d354 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c | |||
@@ -635,8 +635,8 @@ int cfspi_spi_probe(struct platform_device *pdev) | |||
635 | 635 | ||
636 | ndev = alloc_netdev(sizeof(struct cfspi), | 636 | ndev = alloc_netdev(sizeof(struct cfspi), |
637 | "cfspi%d", cfspi_setup); | 637 | "cfspi%d", cfspi_setup); |
638 | if (!dev) | 638 | if (!ndev) |
639 | return -ENODEV; | 639 | return -ENOMEM; |
640 | 640 | ||
641 | cfspi = netdev_priv(ndev); | 641 | cfspi = netdev_priv(ndev); |
642 | netif_stop_queue(ndev); | 642 | netif_stop_queue(ndev); |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 4686c3983fc3..4d62f7bfa036 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -31,7 +31,7 @@ | |||
31 | 31 | ||
32 | char e1000_driver_name[] = "e1000"; | 32 | char e1000_driver_name[] = "e1000"; |
33 | static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | 33 | static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; |
34 | #define DRV_VERSION "7.3.21-k6-NAPI" | 34 | #define DRV_VERSION "7.3.21-k8-NAPI" |
35 | const char e1000_driver_version[] = DRV_VERSION; | 35 | const char e1000_driver_version[] = DRV_VERSION; |
36 | static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; | 36 | static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; |
37 | 37 | ||
@@ -485,9 +485,6 @@ void e1000_down(struct e1000_adapter *adapter) | |||
485 | struct net_device *netdev = adapter->netdev; | 485 | struct net_device *netdev = adapter->netdev; |
486 | u32 rctl, tctl; | 486 | u32 rctl, tctl; |
487 | 487 | ||
488 | /* signal that we're down so the interrupt handler does not | ||
489 | * reschedule our watchdog timer */ | ||
490 | set_bit(__E1000_DOWN, &adapter->flags); | ||
491 | 488 | ||
492 | /* disable receives in the hardware */ | 489 | /* disable receives in the hardware */ |
493 | rctl = er32(RCTL); | 490 | rctl = er32(RCTL); |
@@ -508,6 +505,13 @@ void e1000_down(struct e1000_adapter *adapter) | |||
508 | 505 | ||
509 | e1000_irq_disable(adapter); | 506 | e1000_irq_disable(adapter); |
510 | 507 | ||
508 | /* | ||
509 | * Setting DOWN must be after irq_disable to prevent | ||
510 | * a screaming interrupt. Setting DOWN also prevents | ||
511 | * timers and tasks from rescheduling. | ||
512 | */ | ||
513 | set_bit(__E1000_DOWN, &adapter->flags); | ||
514 | |||
511 | del_timer_sync(&adapter->tx_fifo_stall_timer); | 515 | del_timer_sync(&adapter->tx_fifo_stall_timer); |
512 | del_timer_sync(&adapter->watchdog_timer); | 516 | del_timer_sync(&adapter->watchdog_timer); |
513 | del_timer_sync(&adapter->phy_info_timer); | 517 | del_timer_sync(&adapter->phy_info_timer); |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 49e4ce1246a7..d1bec6269173 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -577,11 +577,10 @@ static int gfar_parse_group(struct device_node *np, | |||
577 | irq_of_parse_and_map(np, 1); | 577 | irq_of_parse_and_map(np, 1); |
578 | priv->gfargrp[priv->num_grps].interruptError = | 578 | priv->gfargrp[priv->num_grps].interruptError = |
579 | irq_of_parse_and_map(np,2); | 579 | irq_of_parse_and_map(np,2); |
580 | if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 || | 580 | if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ || |
581 | priv->gfargrp[priv->num_grps].interruptReceive < 0 || | 581 | priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ || |
582 | priv->gfargrp[priv->num_grps].interruptError < 0) { | 582 | priv->gfargrp[priv->num_grps].interruptError == NO_IRQ) |
583 | return -EINVAL; | 583 | return -EINVAL; |
584 | } | ||
585 | } | 584 | } |
586 | 585 | ||
587 | priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; | 586 | priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; |
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c index dc0198092343..aa93655c3aa7 100644 --- a/drivers/net/ipg.c +++ b/drivers/net/ipg.c | |||
@@ -88,16 +88,14 @@ static const char *ipg_brand_name[] = { | |||
88 | "IC PLUS IP1000 1000/100/10 based NIC", | 88 | "IC PLUS IP1000 1000/100/10 based NIC", |
89 | "Sundance Technology ST2021 based NIC", | 89 | "Sundance Technology ST2021 based NIC", |
90 | "Tamarack Microelectronics TC9020/9021 based NIC", | 90 | "Tamarack Microelectronics TC9020/9021 based NIC", |
91 | "Tamarack Microelectronics TC9020/9021 based NIC", | ||
92 | "D-Link NIC IP1000A" | 91 | "D-Link NIC IP1000A" |
93 | }; | 92 | }; |
94 | 93 | ||
95 | static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = { | 94 | static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = { |
96 | { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, | 95 | { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, |
97 | { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, | 96 | { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, |
98 | { PCI_VDEVICE(SUNDANCE, 0x1021), 2 }, | 97 | { PCI_VDEVICE(DLINK, 0x9021), 2 }, |
99 | { PCI_VDEVICE(DLINK, 0x9021), 3 }, | 98 | { PCI_VDEVICE(DLINK, 0x4020), 3 }, |
100 | { PCI_VDEVICE(DLINK, 0x4020), 4 }, | ||
101 | { 0, } | 99 | { 0, } |
102 | }; | 100 | }; |
103 | 101 | ||
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c index 00b38bccd6d0..52a7c86af663 100644 --- a/drivers/net/irda/sh_sir.c +++ b/drivers/net/irda/sh_sir.c | |||
@@ -258,7 +258,7 @@ static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate) | |||
258 | 258 | ||
259 | /* Baud Rate Error Correction x 10000 */ | 259 | /* Baud Rate Error Correction x 10000 */ |
260 | u32 rate_err_array[] = { | 260 | u32 rate_err_array[] = { |
261 | 0000, 0625, 1250, 1875, | 261 | 0, 625, 1250, 1875, |
262 | 2500, 3125, 3750, 4375, | 262 | 2500, 3125, 3750, 4375, |
263 | 5000, 5625, 6250, 6875, | 263 | 5000, 5625, 6250, 6875, |
264 | 7500, 8125, 8750, 9375, | 264 | 7500, 8125, 8750, 9375, |
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index f0bd1a1aba3a..e8b9c53c304b 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
@@ -30,11 +30,14 @@ | |||
30 | #include <linux/ethtool.h> | 30 | #include <linux/ethtool.h> |
31 | #include <linux/phy.h> | 31 | #include <linux/phy.h> |
32 | #include <linux/marvell_phy.h> | 32 | #include <linux/marvell_phy.h> |
33 | #include <linux/of.h> | ||
33 | 34 | ||
34 | #include <asm/io.h> | 35 | #include <asm/io.h> |
35 | #include <asm/irq.h> | 36 | #include <asm/irq.h> |
36 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
37 | 38 | ||
39 | #define MII_MARVELL_PHY_PAGE 22 | ||
40 | |||
38 | #define MII_M1011_IEVENT 0x13 | 41 | #define MII_M1011_IEVENT 0x13 |
39 | #define MII_M1011_IEVENT_CLEAR 0x0000 | 42 | #define MII_M1011_IEVENT_CLEAR 0x0000 |
40 | 43 | ||
@@ -80,7 +83,6 @@ | |||
80 | #define MII_88E1121_PHY_LED_CTRL 16 | 83 | #define MII_88E1121_PHY_LED_CTRL 16 |
81 | #define MII_88E1121_PHY_LED_PAGE 3 | 84 | #define MII_88E1121_PHY_LED_PAGE 3 |
82 | #define MII_88E1121_PHY_LED_DEF 0x0030 | 85 | #define MII_88E1121_PHY_LED_DEF 0x0030 |
83 | #define MII_88E1121_PHY_PAGE 22 | ||
84 | 86 | ||
85 | #define MII_M1011_PHY_STATUS 0x11 | 87 | #define MII_M1011_PHY_STATUS 0x11 |
86 | #define MII_M1011_PHY_STATUS_1000 0x8000 | 88 | #define MII_M1011_PHY_STATUS_1000 0x8000 |
@@ -186,13 +188,94 @@ static int marvell_config_aneg(struct phy_device *phydev) | |||
186 | return 0; | 188 | return 0; |
187 | } | 189 | } |
188 | 190 | ||
191 | #ifdef CONFIG_OF_MDIO | ||
192 | /* | ||
193 | * Set and/or override some configuration registers based on the | ||
194 | * marvell,reg-init property stored in the of_node for the phydev. | ||
195 | * | ||
196 | * marvell,reg-init = <reg-page reg mask value>,...; | ||
197 | * | ||
198 | * There may be one or more sets of <reg-page reg mask value>: | ||
199 | * | ||
200 | * reg-page: which register bank to use. | ||
201 | * reg: the register. | ||
202 | * mask: if non-zero, ANDed with existing register value. | ||
203 | * value: ORed with the masked value and written to the regiser. | ||
204 | * | ||
205 | */ | ||
206 | static int marvell_of_reg_init(struct phy_device *phydev) | ||
207 | { | ||
208 | const __be32 *paddr; | ||
209 | int len, i, saved_page, current_page, page_changed, ret; | ||
210 | |||
211 | if (!phydev->dev.of_node) | ||
212 | return 0; | ||
213 | |||
214 | paddr = of_get_property(phydev->dev.of_node, "marvell,reg-init", &len); | ||
215 | if (!paddr || len < (4 * sizeof(*paddr))) | ||
216 | return 0; | ||
217 | |||
218 | saved_page = phy_read(phydev, MII_MARVELL_PHY_PAGE); | ||
219 | if (saved_page < 0) | ||
220 | return saved_page; | ||
221 | page_changed = 0; | ||
222 | current_page = saved_page; | ||
223 | |||
224 | ret = 0; | ||
225 | len /= sizeof(*paddr); | ||
226 | for (i = 0; i < len - 3; i += 4) { | ||
227 | u16 reg_page = be32_to_cpup(paddr + i); | ||
228 | u16 reg = be32_to_cpup(paddr + i + 1); | ||
229 | u16 mask = be32_to_cpup(paddr + i + 2); | ||
230 | u16 val_bits = be32_to_cpup(paddr + i + 3); | ||
231 | int val; | ||
232 | |||
233 | if (reg_page != current_page) { | ||
234 | current_page = reg_page; | ||
235 | page_changed = 1; | ||
236 | ret = phy_write(phydev, MII_MARVELL_PHY_PAGE, reg_page); | ||
237 | if (ret < 0) | ||
238 | goto err; | ||
239 | } | ||
240 | |||
241 | val = 0; | ||
242 | if (mask) { | ||
243 | val = phy_read(phydev, reg); | ||
244 | if (val < 0) { | ||
245 | ret = val; | ||
246 | goto err; | ||
247 | } | ||
248 | val &= mask; | ||
249 | } | ||
250 | val |= val_bits; | ||
251 | |||
252 | ret = phy_write(phydev, reg, val); | ||
253 | if (ret < 0) | ||
254 | goto err; | ||
255 | |||
256 | } | ||
257 | err: | ||
258 | if (page_changed) { | ||
259 | i = phy_write(phydev, MII_MARVELL_PHY_PAGE, saved_page); | ||
260 | if (ret == 0) | ||
261 | ret = i; | ||
262 | } | ||
263 | return ret; | ||
264 | } | ||
265 | #else | ||
266 | static int marvell_of_reg_init(struct phy_device *phydev) | ||
267 | { | ||
268 | return 0; | ||
269 | } | ||
270 | #endif /* CONFIG_OF_MDIO */ | ||
271 | |||
189 | static int m88e1121_config_aneg(struct phy_device *phydev) | 272 | static int m88e1121_config_aneg(struct phy_device *phydev) |
190 | { | 273 | { |
191 | int err, oldpage, mscr; | 274 | int err, oldpage, mscr; |
192 | 275 | ||
193 | oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); | 276 | oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE); |
194 | 277 | ||
195 | err = phy_write(phydev, MII_88E1121_PHY_PAGE, | 278 | err = phy_write(phydev, MII_MARVELL_PHY_PAGE, |
196 | MII_88E1121_PHY_MSCR_PAGE); | 279 | MII_88E1121_PHY_MSCR_PAGE); |
197 | if (err < 0) | 280 | if (err < 0) |
198 | return err; | 281 | return err; |
@@ -218,7 +301,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev) | |||
218 | return err; | 301 | return err; |
219 | } | 302 | } |
220 | 303 | ||
221 | phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); | 304 | phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage); |
222 | 305 | ||
223 | err = phy_write(phydev, MII_BMCR, BMCR_RESET); | 306 | err = phy_write(phydev, MII_BMCR, BMCR_RESET); |
224 | if (err < 0) | 307 | if (err < 0) |
@@ -229,11 +312,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev) | |||
229 | if (err < 0) | 312 | if (err < 0) |
230 | return err; | 313 | return err; |
231 | 314 | ||
232 | oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); | 315 | oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE); |
233 | 316 | ||
234 | phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE); | 317 | phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE); |
235 | phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF); | 318 | phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF); |
236 | phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); | 319 | phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage); |
237 | 320 | ||
238 | err = genphy_config_aneg(phydev); | 321 | err = genphy_config_aneg(phydev); |
239 | 322 | ||
@@ -244,9 +327,9 @@ static int m88e1318_config_aneg(struct phy_device *phydev) | |||
244 | { | 327 | { |
245 | int err, oldpage, mscr; | 328 | int err, oldpage, mscr; |
246 | 329 | ||
247 | oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); | 330 | oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE); |
248 | 331 | ||
249 | err = phy_write(phydev, MII_88E1121_PHY_PAGE, | 332 | err = phy_write(phydev, MII_MARVELL_PHY_PAGE, |
250 | MII_88E1121_PHY_MSCR_PAGE); | 333 | MII_88E1121_PHY_MSCR_PAGE); |
251 | if (err < 0) | 334 | if (err < 0) |
252 | return err; | 335 | return err; |
@@ -258,7 +341,7 @@ static int m88e1318_config_aneg(struct phy_device *phydev) | |||
258 | if (err < 0) | 341 | if (err < 0) |
259 | return err; | 342 | return err; |
260 | 343 | ||
261 | err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); | 344 | err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage); |
262 | if (err < 0) | 345 | if (err < 0) |
263 | return err; | 346 | return err; |
264 | 347 | ||
@@ -368,6 +451,9 @@ static int m88e1111_config_init(struct phy_device *phydev) | |||
368 | return err; | 451 | return err; |
369 | } | 452 | } |
370 | 453 | ||
454 | err = marvell_of_reg_init(phydev); | ||
455 | if (err < 0) | ||
456 | return err; | ||
371 | 457 | ||
372 | err = phy_write(phydev, MII_BMCR, BMCR_RESET); | 458 | err = phy_write(phydev, MII_BMCR, BMCR_RESET); |
373 | if (err < 0) | 459 | if (err < 0) |
@@ -398,7 +484,7 @@ static int m88e1118_config_init(struct phy_device *phydev) | |||
398 | int err; | 484 | int err; |
399 | 485 | ||
400 | /* Change address */ | 486 | /* Change address */ |
401 | err = phy_write(phydev, 0x16, 0x0002); | 487 | err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002); |
402 | if (err < 0) | 488 | if (err < 0) |
403 | return err; | 489 | return err; |
404 | 490 | ||
@@ -408,7 +494,7 @@ static int m88e1118_config_init(struct phy_device *phydev) | |||
408 | return err; | 494 | return err; |
409 | 495 | ||
410 | /* Change address */ | 496 | /* Change address */ |
411 | err = phy_write(phydev, 0x16, 0x0003); | 497 | err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0003); |
412 | if (err < 0) | 498 | if (err < 0) |
413 | return err; | 499 | return err; |
414 | 500 | ||
@@ -420,8 +506,42 @@ static int m88e1118_config_init(struct phy_device *phydev) | |||
420 | if (err < 0) | 506 | if (err < 0) |
421 | return err; | 507 | return err; |
422 | 508 | ||
509 | err = marvell_of_reg_init(phydev); | ||
510 | if (err < 0) | ||
511 | return err; | ||
512 | |||
423 | /* Reset address */ | 513 | /* Reset address */ |
424 | err = phy_write(phydev, 0x16, 0x0); | 514 | err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0); |
515 | if (err < 0) | ||
516 | return err; | ||
517 | |||
518 | err = phy_write(phydev, MII_BMCR, BMCR_RESET); | ||
519 | if (err < 0) | ||
520 | return err; | ||
521 | |||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | static int m88e1149_config_init(struct phy_device *phydev) | ||
526 | { | ||
527 | int err; | ||
528 | |||
529 | /* Change address */ | ||
530 | err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002); | ||
531 | if (err < 0) | ||
532 | return err; | ||
533 | |||
534 | /* Enable 1000 Mbit */ | ||
535 | err = phy_write(phydev, 0x15, 0x1048); | ||
536 | if (err < 0) | ||
537 | return err; | ||
538 | |||
539 | err = marvell_of_reg_init(phydev); | ||
540 | if (err < 0) | ||
541 | return err; | ||
542 | |||
543 | /* Reset address */ | ||
544 | err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0); | ||
425 | if (err < 0) | 545 | if (err < 0) |
426 | return err; | 546 | return err; |
427 | 547 | ||
@@ -491,6 +611,10 @@ static int m88e1145_config_init(struct phy_device *phydev) | |||
491 | } | 611 | } |
492 | } | 612 | } |
493 | 613 | ||
614 | err = marvell_of_reg_init(phydev); | ||
615 | if (err < 0) | ||
616 | return err; | ||
617 | |||
494 | return 0; | 618 | return 0; |
495 | } | 619 | } |
496 | 620 | ||
@@ -685,6 +809,19 @@ static struct phy_driver marvell_drivers[] = { | |||
685 | .driver = { .owner = THIS_MODULE }, | 809 | .driver = { .owner = THIS_MODULE }, |
686 | }, | 810 | }, |
687 | { | 811 | { |
812 | .phy_id = MARVELL_PHY_ID_88E1149R, | ||
813 | .phy_id_mask = MARVELL_PHY_ID_MASK, | ||
814 | .name = "Marvell 88E1149R", | ||
815 | .features = PHY_GBIT_FEATURES, | ||
816 | .flags = PHY_HAS_INTERRUPT, | ||
817 | .config_init = &m88e1149_config_init, | ||
818 | .config_aneg = &m88e1118_config_aneg, | ||
819 | .read_status = &genphy_read_status, | ||
820 | .ack_interrupt = &marvell_ack_interrupt, | ||
821 | .config_intr = &marvell_config_intr, | ||
822 | .driver = { .owner = THIS_MODULE }, | ||
823 | }, | ||
824 | { | ||
688 | .phy_id = MARVELL_PHY_ID_88E1240, | 825 | .phy_id = MARVELL_PHY_ID_88E1240, |
689 | .phy_id_mask = MARVELL_PHY_ID_MASK, | 826 | .phy_id_mask = MARVELL_PHY_ID_MASK, |
690 | .name = "Marvell 88E1240", | 827 | .name = "Marvell 88E1240", |
@@ -735,6 +872,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { | |||
735 | { 0x01410e10, 0xfffffff0 }, | 872 | { 0x01410e10, 0xfffffff0 }, |
736 | { 0x01410cb0, 0xfffffff0 }, | 873 | { 0x01410cb0, 0xfffffff0 }, |
737 | { 0x01410cd0, 0xfffffff0 }, | 874 | { 0x01410cd0, 0xfffffff0 }, |
875 | { 0x01410e50, 0xfffffff0 }, | ||
738 | { 0x01410e30, 0xfffffff0 }, | 876 | { 0x01410e30, 0xfffffff0 }, |
739 | { 0x01410e90, 0xfffffff0 }, | 877 | { 0x01410e90, 0xfffffff0 }, |
740 | { } | 878 | { } |
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index c30e0fe55a31..528eaef5308f 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -62,15 +62,15 @@ static const u32 default_msg = | |||
62 | /* NETIF_MSG_PKTDATA | */ | 62 | /* NETIF_MSG_PKTDATA | */ |
63 | NETIF_MSG_HW | NETIF_MSG_WOL | 0; | 63 | NETIF_MSG_HW | NETIF_MSG_WOL | 0; |
64 | 64 | ||
65 | static int debug = 0x00007fff; /* defaults above */ | 65 | static int debug = -1; /* defaults above */ |
66 | module_param(debug, int, 0); | 66 | module_param(debug, int, 0664); |
67 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | 67 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); |
68 | 68 | ||
69 | #define MSIX_IRQ 0 | 69 | #define MSIX_IRQ 0 |
70 | #define MSI_IRQ 1 | 70 | #define MSI_IRQ 1 |
71 | #define LEG_IRQ 2 | 71 | #define LEG_IRQ 2 |
72 | static int qlge_irq_type = MSIX_IRQ; | 72 | static int qlge_irq_type = MSIX_IRQ; |
73 | module_param(qlge_irq_type, int, MSIX_IRQ); | 73 | module_param(qlge_irq_type, int, 0664); |
74 | MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); | 74 | MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); |
75 | 75 | ||
76 | static int qlge_mpi_coredump; | 76 | static int qlge_mpi_coredump; |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 4c4d16905efb..7d33ef4bcb4a 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -4440,8 +4440,7 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) | |||
4440 | u32 status = opts1 & RxProtoMask; | 4440 | u32 status = opts1 & RxProtoMask; |
4441 | 4441 | ||
4442 | if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || | 4442 | if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || |
4443 | ((status == RxProtoUDP) && !(opts1 & UDPFail)) || | 4443 | ((status == RxProtoUDP) && !(opts1 & UDPFail))) |
4444 | ((status == RxProtoIP) && !(opts1 & IPFail))) | ||
4445 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 4444 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
4446 | else | 4445 | else |
4447 | skb_checksum_none_assert(skb); | 4446 | skb_checksum_none_assert(skb); |
diff --git a/drivers/net/tile/Makefile b/drivers/net/tile/Makefile new file mode 100644 index 000000000000..f634f142cab4 --- /dev/null +++ b/drivers/net/tile/Makefile | |||
@@ -0,0 +1,10 @@ | |||
1 | # | ||
2 | # Makefile for the TILE on-chip networking support. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_TILE_NET) += tile_net.o | ||
6 | ifdef CONFIG_TILEGX | ||
7 | tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o | ||
8 | else | ||
9 | tile_net-objs := tilepro.o | ||
10 | endif | ||
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c new file mode 100644 index 000000000000..0e6bac5ec65b --- /dev/null +++ b/drivers/net/tile/tilepro.c | |||
@@ -0,0 +1,2406 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/moduleparam.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> /* printk() */ | ||
20 | #include <linux/slab.h> /* kmalloc() */ | ||
21 | #include <linux/errno.h> /* error codes */ | ||
22 | #include <linux/types.h> /* size_t */ | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/in.h> | ||
25 | #include <linux/netdevice.h> /* struct device, and other headers */ | ||
26 | #include <linux/etherdevice.h> /* eth_type_trans */ | ||
27 | #include <linux/skbuff.h> | ||
28 | #include <linux/ioctl.h> | ||
29 | #include <linux/cdev.h> | ||
30 | #include <linux/hugetlb.h> | ||
31 | #include <linux/in6.h> | ||
32 | #include <linux/timer.h> | ||
33 | #include <linux/io.h> | ||
34 | #include <asm/checksum.h> | ||
35 | #include <asm/homecache.h> | ||
36 | |||
37 | #include <hv/drv_xgbe_intf.h> | ||
38 | #include <hv/drv_xgbe_impl.h> | ||
39 | #include <hv/hypervisor.h> | ||
40 | #include <hv/netio_intf.h> | ||
41 | |||
42 | /* For TSO */ | ||
43 | #include <linux/ip.h> | ||
44 | #include <linux/tcp.h> | ||
45 | |||
46 | |||
47 | /* There is no singlethread_cpu, so schedule work on the current cpu. */ | ||
48 | #define singlethread_cpu -1 | ||
49 | |||
50 | |||
51 | /* | ||
52 | * First, "tile_net_init_module()" initializes all four "devices" which | ||
53 | * can be used by linux. | ||
54 | * | ||
55 | * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes | ||
56 | * the network cpus, then uses "tile_net_open_aux()" to initialize | ||
57 | * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all | ||
58 | * the tiles, provide buffers to LIPP, allow ingress to start, and | ||
59 | * turn on hypervisor interrupt handling (and NAPI) on all tiles. | ||
60 | * | ||
61 | * If registration fails due to the link being down, then "retry_work" | ||
62 | * is used to keep calling "tile_net_open_inner()" until it succeeds. | ||
63 | * | ||
64 | * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to | ||
65 | * stop egress, drain the LIPP buffers, unregister all the tiles, stop | ||
66 | * LIPP/LEPP, and wipe the LEPP queue. | ||
67 | * | ||
68 | * We start out with the ingress interrupt enabled on each CPU. When | ||
69 | * this interrupt fires, we disable it, and call "napi_schedule()". | ||
70 | * This will cause "tile_net_poll()" to be called, which will pull | ||
71 | * packets from the netio queue, filtering them out, or passing them | ||
72 | * to "netif_receive_skb()". If our budget is exhausted, we will | ||
73 | * return, knowing we will be called again later. Otherwise, we | ||
74 | * reenable the ingress interrupt, and call "napi_complete()". | ||
75 | * | ||
76 | * | ||
77 | * NOTE: The use of "native_driver" ensures that EPP exists, and that | ||
78 | * "epp_sendv" is legal, and that "LIPP" is being used. | ||
79 | * | ||
80 | * NOTE: Failing to free completions for an arbitrarily long time | ||
81 | * (which is defined to be illegal) does in fact cause bizarre | ||
82 | * problems. The "egress_timer" helps prevent this from happening. | ||
83 | * | ||
84 | * NOTE: The egress code can be interrupted by the interrupt handler. | ||
85 | */ | ||
86 | |||
87 | |||
88 | /* HACK: Allow use of "jumbo" packets. */ | ||
89 | /* This should be 1500 if "jumbo" is not set in LIPP. */ | ||
90 | /* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */ | ||
91 | /* ISSUE: This has not been thoroughly tested (except at 1500). */ | ||
92 | #define TILE_NET_MTU 1500 | ||
93 | |||
94 | /* HACK: Define to support GSO. */ | ||
95 | /* ISSUE: This may actually hurt performance of the TCP blaster. */ | ||
96 | /* #define TILE_NET_GSO */ | ||
97 | |||
98 | /* Define this to collapse "duplicate" acks. */ | ||
99 | /* #define IGNORE_DUP_ACKS */ | ||
100 | |||
101 | /* HACK: Define this to verify incoming packets. */ | ||
102 | /* #define TILE_NET_VERIFY_INGRESS */ | ||
103 | |||
104 | /* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */ | ||
105 | #define TILE_NET_TX_QUEUE_LEN 0 | ||
106 | |||
107 | /* Define to dump packets (prints out the whole packet on tx and rx). */ | ||
108 | /* #define TILE_NET_DUMP_PACKETS */ | ||
109 | |||
110 | /* Define to enable debug spew (all PDEBUG's are enabled). */ | ||
111 | /* #define TILE_NET_DEBUG */ | ||
112 | |||
113 | |||
114 | /* Define to activate paranoia checks. */ | ||
115 | /* #define TILE_NET_PARANOIA */ | ||
116 | |||
117 | /* Default transmit lockup timeout period, in jiffies. */ | ||
118 | #define TILE_NET_TIMEOUT (5 * HZ) | ||
119 | |||
120 | /* Default retry interval for bringing up the NetIO interface, in jiffies. */ | ||
121 | #define TILE_NET_RETRY_INTERVAL (5 * HZ) | ||
122 | |||
123 | /* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */ | ||
124 | #define TILE_NET_DEVS 4 | ||
125 | |||
126 | |||
127 | |||
128 | /* Paranoia. */ | ||
129 | #if NET_IP_ALIGN != LIPP_PACKET_PADDING | ||
130 | #error "NET_IP_ALIGN must match LIPP_PACKET_PADDING." | ||
131 | #endif | ||
132 | |||
133 | |||
134 | /* Debug print. */ | ||
135 | #ifdef TILE_NET_DEBUG | ||
136 | #define PDEBUG(fmt, args...) net_printk(fmt, ## args) | ||
137 | #else | ||
138 | #define PDEBUG(fmt, args...) | ||
139 | #endif | ||
140 | |||
141 | |||
142 | MODULE_AUTHOR("Tilera"); | ||
143 | MODULE_LICENSE("GPL"); | ||
144 | |||
145 | |||
146 | #define IS_MULTICAST(mac_addr) \ | ||
147 | (((u8 *)(mac_addr))[0] & 0x01) | ||
148 | |||
149 | #define IS_BROADCAST(mac_addr) \ | ||
150 | (((u16 *)(mac_addr))[0] == 0xffff) | ||
151 | |||
152 | |||
153 | /* | ||
154 | * Queue of incoming packets for a specific cpu and device. | ||
155 | * | ||
156 | * Includes a pointer to the "system" data, and the actual "user" data. | ||
157 | */ | ||
158 | struct tile_netio_queue { | ||
159 | netio_queue_impl_t *__system_part; | ||
160 | netio_queue_user_impl_t __user_part; | ||
161 | |||
162 | }; | ||
163 | |||
164 | |||
165 | /* | ||
166 | * Statistics counters for a specific cpu and device. | ||
167 | */ | ||
168 | struct tile_net_stats_t { | ||
169 | u32 rx_packets; | ||
170 | u32 rx_bytes; | ||
171 | u32 tx_packets; | ||
172 | u32 tx_bytes; | ||
173 | }; | ||
174 | |||
175 | |||
176 | /* | ||
177 | * Info for a specific cpu and device. | ||
178 | * | ||
179 | * ISSUE: There is a "dev" pointer in "napi" as well. | ||
180 | */ | ||
181 | struct tile_net_cpu { | ||
182 | /* The NAPI struct. */ | ||
183 | struct napi_struct napi; | ||
184 | /* Packet queue. */ | ||
185 | struct tile_netio_queue queue; | ||
186 | /* Statistics. */ | ||
187 | struct tile_net_stats_t stats; | ||
188 | /* ISSUE: Is this needed? */ | ||
189 | bool napi_enabled; | ||
190 | /* True if this tile has succcessfully registered with the IPP. */ | ||
191 | bool registered; | ||
192 | /* True if the link was down last time we tried to register. */ | ||
193 | bool link_down; | ||
194 | /* True if "egress_timer" is scheduled. */ | ||
195 | bool egress_timer_scheduled; | ||
196 | /* Number of small sk_buffs which must still be provided. */ | ||
197 | unsigned int num_needed_small_buffers; | ||
198 | /* Number of large sk_buffs which must still be provided. */ | ||
199 | unsigned int num_needed_large_buffers; | ||
200 | /* A timer for handling egress completions. */ | ||
201 | struct timer_list egress_timer; | ||
202 | }; | ||
203 | |||
204 | |||
205 | /* | ||
206 | * Info for a specific device. | ||
207 | */ | ||
208 | struct tile_net_priv { | ||
209 | /* Our network device. */ | ||
210 | struct net_device *dev; | ||
211 | /* The actual egress queue. */ | ||
212 | lepp_queue_t *epp_queue; | ||
213 | /* Protects "epp_queue->cmd_tail" and "epp_queue->comp_tail" */ | ||
214 | spinlock_t cmd_lock; | ||
215 | /* Protects "epp_queue->comp_head". */ | ||
216 | spinlock_t comp_lock; | ||
217 | /* The hypervisor handle for this interface. */ | ||
218 | int hv_devhdl; | ||
219 | /* The intr bit mask that IDs this device. */ | ||
220 | u32 intr_id; | ||
221 | /* True iff "tile_net_open_aux()" has succeeded. */ | ||
222 | int partly_opened; | ||
223 | /* True iff "tile_net_open_inner()" has succeeded. */ | ||
224 | int fully_opened; | ||
225 | /* Effective network cpus. */ | ||
226 | struct cpumask network_cpus_map; | ||
227 | /* Number of network cpus. */ | ||
228 | int network_cpus_count; | ||
229 | /* Credits per network cpu. */ | ||
230 | int network_cpus_credits; | ||
231 | /* Network stats. */ | ||
232 | struct net_device_stats stats; | ||
233 | /* For NetIO bringup retries. */ | ||
234 | struct delayed_work retry_work; | ||
235 | /* Quick access to per cpu data. */ | ||
236 | struct tile_net_cpu *cpu[NR_CPUS]; | ||
237 | }; | ||
238 | |||
239 | |||
240 | /* | ||
241 | * The actual devices (xgbe0, xgbe1, gbe0, gbe1). | ||
242 | */ | ||
243 | static struct net_device *tile_net_devs[TILE_NET_DEVS]; | ||
244 | |||
245 | /* | ||
246 | * The "tile_net_cpu" structures for each device. | ||
247 | */ | ||
248 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0); | ||
249 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1); | ||
250 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0); | ||
251 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1); | ||
252 | |||
253 | |||
254 | /* | ||
255 | * True if "network_cpus" was specified. | ||
256 | */ | ||
257 | static bool network_cpus_used; | ||
258 | |||
259 | /* | ||
260 | * The actual cpus in "network_cpus". | ||
261 | */ | ||
262 | static struct cpumask network_cpus_map; | ||
263 | |||
264 | |||
265 | |||
266 | #ifdef TILE_NET_DEBUG | ||
267 | /* | ||
268 | * printk with extra stuff. | ||
269 | * | ||
270 | * We print the CPU we're running in brackets. | ||
271 | */ | ||
272 | static void net_printk(char *fmt, ...) | ||
273 | { | ||
274 | int i; | ||
275 | int len; | ||
276 | va_list args; | ||
277 | static char buf[256]; | ||
278 | |||
279 | len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id()); | ||
280 | va_start(args, fmt); | ||
281 | i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args); | ||
282 | va_end(args); | ||
283 | buf[255] = '\0'; | ||
284 | pr_notice(buf); | ||
285 | } | ||
286 | #endif | ||
287 | |||
288 | |||
289 | #ifdef TILE_NET_DUMP_PACKETS | ||
290 | /* | ||
291 | * Dump a packet. | ||
292 | */ | ||
293 | static void dump_packet(unsigned char *data, unsigned long length, char *s) | ||
294 | { | ||
295 | unsigned long i; | ||
296 | static unsigned int count; | ||
297 | |||
298 | pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n", | ||
299 | data, length, s, count++); | ||
300 | |||
301 | pr_info("\n"); | ||
302 | |||
303 | for (i = 0; i < length; i++) { | ||
304 | if ((i & 0xf) == 0) | ||
305 | sprintf(buf, "%8.8lx:", i); | ||
306 | sprintf(buf + strlen(buf), " %2.2x", data[i]); | ||
307 | if ((i & 0xf) == 0xf || i == length - 1) | ||
308 | pr_info("%s\n", buf); | ||
309 | } | ||
310 | } | ||
311 | #endif | ||
312 | |||
313 | |||
314 | /* | ||
315 | * Provide support for the __netio_fastio1() swint | ||
316 | * (see <hv/drv_xgbe_intf.h> for how it is used). | ||
317 | * | ||
318 | * The fastio swint2 call may clobber all the caller-saved registers. | ||
319 | * It rarely clobbers memory, but we allow for the possibility in | ||
320 | * the signature just to be on the safe side. | ||
321 | * | ||
322 | * Also, gcc doesn't seem to allow an input operand to be | ||
323 | * clobbered, so we fake it with dummy outputs. | ||
324 | * | ||
325 | * This function can't be static because of the way it is declared | ||
326 | * in the netio header. | ||
327 | */ | ||
328 | inline int __netio_fastio1(u32 fastio_index, u32 arg0) | ||
329 | { | ||
330 | long result, clobber_r1, clobber_r10; | ||
331 | asm volatile("swint2" | ||
332 | : "=R00" (result), | ||
333 | "=R01" (clobber_r1), "=R10" (clobber_r10) | ||
334 | : "R10" (fastio_index), "R01" (arg0) | ||
335 | : "memory", "r2", "r3", "r4", | ||
336 | "r5", "r6", "r7", "r8", "r9", | ||
337 | "r11", "r12", "r13", "r14", | ||
338 | "r15", "r16", "r17", "r18", "r19", | ||
339 | "r20", "r21", "r22", "r23", "r24", | ||
340 | "r25", "r26", "r27", "r28", "r29"); | ||
341 | return result; | ||
342 | } | ||
343 | |||
344 | |||
345 | /* | ||
346 | * Provide a linux buffer to LIPP. | ||
347 | */ | ||
348 | static void tile_net_provide_linux_buffer(struct tile_net_cpu *info, | ||
349 | void *va, bool small) | ||
350 | { | ||
351 | struct tile_netio_queue *queue = &info->queue; | ||
352 | |||
353 | /* Convert "va" and "small" to "linux_buffer_t". */ | ||
354 | unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small; | ||
355 | |||
356 | __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer); | ||
357 | } | ||
358 | |||
359 | |||
360 | /* | ||
361 | * Provide a linux buffer for LIPP. | ||
362 | */ | ||
363 | static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info, | ||
364 | bool small) | ||
365 | { | ||
366 | /* ISSUE: What should we use here? */ | ||
367 | unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100; | ||
368 | |||
369 | /* Round up to ensure to avoid "false sharing" with last cache line. */ | ||
370 | unsigned int buffer_size = | ||
371 | (((small ? LIPP_SMALL_PACKET_SIZE : large_size) + | ||
372 | CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE()); | ||
373 | |||
374 | /* | ||
375 | * ISSUE: Since CPAs are 38 bits, and we can only encode the | ||
376 | * high 31 bits in a "linux_buffer_t", the low 7 bits must be | ||
377 | * zero, and thus, we must align the actual "va" mod 128. | ||
378 | */ | ||
379 | const unsigned long align = 128; | ||
380 | |||
381 | struct sk_buff *skb; | ||
382 | void *va; | ||
383 | |||
384 | struct sk_buff **skb_ptr; | ||
385 | |||
386 | /* Note that "dev_alloc_skb()" adds NET_SKB_PAD more bytes, */ | ||
387 | /* and also "reserves" that many bytes. */ | ||
388 | /* ISSUE: Can we "share" the NET_SKB_PAD bytes with "skb_ptr"? */ | ||
389 | int len = sizeof(*skb_ptr) + align + buffer_size; | ||
390 | |||
391 | while (1) { | ||
392 | |||
393 | /* Allocate (or fail). */ | ||
394 | skb = dev_alloc_skb(len); | ||
395 | if (skb == NULL) | ||
396 | return false; | ||
397 | |||
398 | /* Make room for a back-pointer to 'skb'. */ | ||
399 | skb_reserve(skb, sizeof(*skb_ptr)); | ||
400 | |||
401 | /* Make sure we are aligned. */ | ||
402 | skb_reserve(skb, -(long)skb->data & (align - 1)); | ||
403 | |||
404 | /* This address is given to IPP. */ | ||
405 | va = skb->data; | ||
406 | |||
407 | if (small) | ||
408 | break; | ||
409 | |||
410 | /* ISSUE: This has never been observed! */ | ||
411 | /* Large buffers must not span a huge page. */ | ||
412 | if (((((long)va & ~HPAGE_MASK) + 1535) & HPAGE_MASK) == 0) | ||
413 | break; | ||
414 | pr_err("Leaking unaligned linux buffer at %p.\n", va); | ||
415 | } | ||
416 | |||
417 | /* Skip two bytes to satisfy LIPP assumptions. */ | ||
418 | /* Note that this aligns IP on a 16 byte boundary. */ | ||
419 | /* ISSUE: Do this when the packet arrives? */ | ||
420 | skb_reserve(skb, NET_IP_ALIGN); | ||
421 | |||
422 | /* Save a back-pointer to 'skb'. */ | ||
423 | skb_ptr = va - sizeof(*skb_ptr); | ||
424 | *skb_ptr = skb; | ||
425 | |||
426 | /* Invalidate the packet buffer. */ | ||
427 | if (!hash_default) | ||
428 | __inv_buffer(skb->data, buffer_size); | ||
429 | |||
430 | /* Make sure "skb_ptr" has been flushed. */ | ||
431 | __insn_mf(); | ||
432 | |||
433 | #ifdef TILE_NET_PARANOIA | ||
434 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
435 | if (hash_default) { | ||
436 | HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va); | ||
437 | if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) | ||
438 | panic("Non-coherent ingress buffer!"); | ||
439 | } | ||
440 | #endif | ||
441 | #endif | ||
442 | |||
443 | /* Provide the new buffer. */ | ||
444 | tile_net_provide_linux_buffer(info, va, small); | ||
445 | |||
446 | return true; | ||
447 | } | ||
448 | |||
449 | |||
450 | /* | ||
451 | * Provide linux buffers for LIPP. | ||
452 | */ | ||
453 | static void tile_net_provide_needed_buffers(struct tile_net_cpu *info) | ||
454 | { | ||
455 | while (info->num_needed_small_buffers != 0) { | ||
456 | if (!tile_net_provide_needed_buffer(info, true)) | ||
457 | goto oops; | ||
458 | info->num_needed_small_buffers--; | ||
459 | } | ||
460 | |||
461 | while (info->num_needed_large_buffers != 0) { | ||
462 | if (!tile_net_provide_needed_buffer(info, false)) | ||
463 | goto oops; | ||
464 | info->num_needed_large_buffers--; | ||
465 | } | ||
466 | |||
467 | return; | ||
468 | |||
469 | oops: | ||
470 | |||
471 | /* Add a description to the page allocation failure dump. */ | ||
472 | pr_notice("Could not provide a linux buffer to LIPP.\n"); | ||
473 | } | ||
474 | |||
475 | |||
476 | /* | ||
477 | * Grab some LEPP completions, and store them in "comps", of size | ||
478 | * "comps_size", and return the number of completions which were | ||
479 | * stored, so the caller can free them. | ||
480 | * | ||
481 | * If "pending" is not NULL, it will be set to true if there might | ||
482 | * still be some pending completions caused by this tile, else false. | ||
483 | */ | ||
484 | static unsigned int tile_net_lepp_grab_comps(struct net_device *dev, | ||
485 | struct sk_buff *comps[], | ||
486 | unsigned int comps_size, | ||
487 | bool *pending) | ||
488 | { | ||
489 | struct tile_net_priv *priv = netdev_priv(dev); | ||
490 | |||
491 | lepp_queue_t *eq = priv->epp_queue; | ||
492 | |||
493 | unsigned int n = 0; | ||
494 | |||
495 | unsigned int comp_head; | ||
496 | unsigned int comp_busy; | ||
497 | unsigned int comp_tail; | ||
498 | |||
499 | spin_lock(&priv->comp_lock); | ||
500 | |||
501 | comp_head = eq->comp_head; | ||
502 | comp_busy = eq->comp_busy; | ||
503 | comp_tail = eq->comp_tail; | ||
504 | |||
505 | while (comp_head != comp_busy && n < comps_size) { | ||
506 | comps[n++] = eq->comps[comp_head]; | ||
507 | LEPP_QINC(comp_head); | ||
508 | } | ||
509 | |||
510 | if (pending != NULL) | ||
511 | *pending = (comp_head != comp_tail); | ||
512 | |||
513 | eq->comp_head = comp_head; | ||
514 | |||
515 | spin_unlock(&priv->comp_lock); | ||
516 | |||
517 | return n; | ||
518 | } | ||
519 | |||
520 | |||
521 | /* | ||
522 | * Make sure the egress timer is scheduled. | ||
523 | * | ||
524 | * Note that we use "schedule if not scheduled" logic instead of the more | ||
525 | * obvious "reschedule" logic, because "reschedule" is fairly expensive. | ||
526 | */ | ||
527 | static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) | ||
528 | { | ||
529 | if (!info->egress_timer_scheduled) { | ||
530 | mod_timer_pinned(&info->egress_timer, jiffies + 1); | ||
531 | info->egress_timer_scheduled = true; | ||
532 | } | ||
533 | } | ||
534 | |||
535 | |||
536 | /* | ||
537 | * The "function" for "info->egress_timer". | ||
538 | * | ||
539 | * This timer will reschedule itself as long as there are any pending | ||
540 | * completions expected (on behalf of any tile). | ||
541 | * | ||
542 | * ISSUE: Realistically, will the timer ever stop scheduling itself? | ||
543 | * | ||
544 | * ISSUE: This timer is almost never actually needed, so just use a global | ||
545 | * timer that can run on any tile. | ||
546 | * | ||
547 | * ISSUE: Maybe instead track number of expected completions, and free | ||
548 | * only that many, resetting to zero if "pending" is ever false. | ||
549 | */ | ||
550 | static void tile_net_handle_egress_timer(unsigned long arg) | ||
551 | { | ||
552 | struct tile_net_cpu *info = (struct tile_net_cpu *)arg; | ||
553 | struct net_device *dev = info->napi.dev; | ||
554 | |||
555 | struct sk_buff *olds[32]; | ||
556 | unsigned int wanted = 32; | ||
557 | unsigned int i, nolds = 0; | ||
558 | bool pending; | ||
559 | |||
560 | /* The timer is no longer scheduled. */ | ||
561 | info->egress_timer_scheduled = false; | ||
562 | |||
563 | nolds = tile_net_lepp_grab_comps(dev, olds, wanted, &pending); | ||
564 | |||
565 | for (i = 0; i < nolds; i++) | ||
566 | kfree_skb(olds[i]); | ||
567 | |||
568 | /* Reschedule timer if needed. */ | ||
569 | if (pending) | ||
570 | tile_net_schedule_egress_timer(info); | ||
571 | } | ||
572 | |||
573 | |||
574 | #ifdef IGNORE_DUP_ACKS | ||
575 | |||
576 | /* | ||
577 | * Help detect "duplicate" ACKs. These are sequential packets (for a | ||
578 | * given flow) which are exactly 66 bytes long, sharing everything but | ||
579 | * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32, | ||
580 | * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are | ||
581 | * +N, and the Tstamps are usually identical. | ||
582 | * | ||
583 | * NOTE: Apparently truly duplicate acks (with identical "ack" values), | ||
584 | * should not be collapsed, as they are used for some kind of flow control. | ||
585 | */ | ||
586 | static bool is_dup_ack(char *s1, char *s2, unsigned int len) | ||
587 | { | ||
588 | int i; | ||
589 | |||
590 | unsigned long long ignorable = 0; | ||
591 | |||
592 | /* Identification. */ | ||
593 | ignorable |= (1ULL << 0x12); | ||
594 | ignorable |= (1ULL << 0x13); | ||
595 | |||
596 | /* Header checksum. */ | ||
597 | ignorable |= (1ULL << 0x18); | ||
598 | ignorable |= (1ULL << 0x19); | ||
599 | |||
600 | /* ACK. */ | ||
601 | ignorable |= (1ULL << 0x2a); | ||
602 | ignorable |= (1ULL << 0x2b); | ||
603 | ignorable |= (1ULL << 0x2c); | ||
604 | ignorable |= (1ULL << 0x2d); | ||
605 | |||
606 | /* WinSize. */ | ||
607 | ignorable |= (1ULL << 0x30); | ||
608 | ignorable |= (1ULL << 0x31); | ||
609 | |||
610 | /* Checksum. */ | ||
611 | ignorable |= (1ULL << 0x32); | ||
612 | ignorable |= (1ULL << 0x33); | ||
613 | |||
614 | for (i = 0; i < len; i++, ignorable >>= 1) { | ||
615 | |||
616 | if ((ignorable & 1) || (s1[i] == s2[i])) | ||
617 | continue; | ||
618 | |||
619 | #ifdef TILE_NET_DEBUG | ||
620 | /* HACK: Mention non-timestamp diffs. */ | ||
621 | if (i < 0x38 && i != 0x2f && | ||
622 | net_ratelimit()) | ||
623 | pr_info("Diff at 0x%x\n", i); | ||
624 | #endif | ||
625 | |||
626 | return false; | ||
627 | } | ||
628 | |||
629 | #ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS | ||
630 | /* HACK: Do not suppress truly duplicate ACKs. */ | ||
631 | /* ISSUE: Is this actually necessary or helpful? */ | ||
632 | if (s1[0x2a] == s2[0x2a] && | ||
633 | s1[0x2b] == s2[0x2b] && | ||
634 | s1[0x2c] == s2[0x2c] && | ||
635 | s1[0x2d] == s2[0x2d]) { | ||
636 | return false; | ||
637 | } | ||
638 | #endif | ||
639 | |||
640 | return true; | ||
641 | } | ||
642 | |||
643 | #endif | ||
644 | |||
645 | |||
646 | |||
647 | /* | ||
648 | * Like "tile_net_handle_packets()", but just discard packets. | ||
649 | */ | ||
650 | static void tile_net_discard_packets(struct net_device *dev) | ||
651 | { | ||
652 | struct tile_net_priv *priv = netdev_priv(dev); | ||
653 | int my_cpu = smp_processor_id(); | ||
654 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | ||
655 | struct tile_netio_queue *queue = &info->queue; | ||
656 | netio_queue_impl_t *qsp = queue->__system_part; | ||
657 | netio_queue_user_impl_t *qup = &queue->__user_part; | ||
658 | |||
659 | while (qup->__packet_receive_read != | ||
660 | qsp->__packet_receive_queue.__packet_write) { | ||
661 | |||
662 | int index = qup->__packet_receive_read; | ||
663 | |||
664 | int index2_aux = index + sizeof(netio_pkt_t); | ||
665 | int index2 = | ||
666 | ((index2_aux == | ||
667 | qsp->__packet_receive_queue.__last_packet_plus_one) ? | ||
668 | 0 : index2_aux); | ||
669 | |||
670 | netio_pkt_t *pkt = (netio_pkt_t *) | ||
671 | ((unsigned long) &qsp[1] + index); | ||
672 | |||
673 | /* Extract the "linux_buffer_t". */ | ||
674 | unsigned int buffer = pkt->__packet.word; | ||
675 | |||
676 | /* Convert "linux_buffer_t" to "va". */ | ||
677 | void *va = __va((phys_addr_t)(buffer >> 1) << 7); | ||
678 | |||
679 | /* Acquire the associated "skb". */ | ||
680 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | ||
681 | struct sk_buff *skb = *skb_ptr; | ||
682 | |||
683 | kfree_skb(skb); | ||
684 | |||
685 | /* Consume this packet. */ | ||
686 | qup->__packet_receive_read = index2; | ||
687 | } | ||
688 | } | ||
689 | |||
690 | |||
691 | /* | ||
692 | * Handle the next packet. Return true if "processed", false if "filtered". | ||
693 | */ | ||
694 | static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) | ||
695 | { | ||
696 | struct net_device *dev = info->napi.dev; | ||
697 | |||
698 | struct tile_netio_queue *queue = &info->queue; | ||
699 | netio_queue_impl_t *qsp = queue->__system_part; | ||
700 | netio_queue_user_impl_t *qup = &queue->__user_part; | ||
701 | struct tile_net_stats_t *stats = &info->stats; | ||
702 | |||
703 | int filter; | ||
704 | |||
705 | int index2_aux = index + sizeof(netio_pkt_t); | ||
706 | int index2 = | ||
707 | ((index2_aux == | ||
708 | qsp->__packet_receive_queue.__last_packet_plus_one) ? | ||
709 | 0 : index2_aux); | ||
710 | |||
711 | netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); | ||
712 | |||
713 | netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); | ||
714 | |||
715 | /* Extract the packet size. */ | ||
716 | unsigned long len = | ||
717 | (NETIO_PKT_CUSTOM_LENGTH(pkt) + | ||
718 | NET_IP_ALIGN - NETIO_PACKET_PADDING); | ||
719 | |||
720 | /* Extract the "linux_buffer_t". */ | ||
721 | unsigned int buffer = pkt->__packet.word; | ||
722 | |||
723 | /* Extract "small" (vs "large"). */ | ||
724 | bool small = ((buffer & 1) != 0); | ||
725 | |||
726 | /* Convert "linux_buffer_t" to "va". */ | ||
727 | void *va = __va((phys_addr_t)(buffer >> 1) << 7); | ||
728 | |||
729 | /* Extract the packet data pointer. */ | ||
730 | /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ | ||
731 | unsigned char *buf = va + NET_IP_ALIGN; | ||
732 | |||
733 | #ifdef IGNORE_DUP_ACKS | ||
734 | |||
735 | static int other; | ||
736 | static int final; | ||
737 | static int keep; | ||
738 | static int skip; | ||
739 | |||
740 | #endif | ||
741 | |||
742 | /* Invalidate the packet buffer. */ | ||
743 | if (!hash_default) | ||
744 | __inv_buffer(buf, len); | ||
745 | |||
746 | /* ISSUE: Is this needed? */ | ||
747 | dev->last_rx = jiffies; | ||
748 | |||
749 | #ifdef TILE_NET_DUMP_PACKETS | ||
750 | dump_packet(buf, len, "rx"); | ||
751 | #endif /* TILE_NET_DUMP_PACKETS */ | ||
752 | |||
753 | #ifdef TILE_NET_VERIFY_INGRESS | ||
754 | if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) && | ||
755 | NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) { | ||
756 | /* | ||
757 | * FIXME: This complains about UDP packets | ||
758 | * with a "zero" checksum (bug 6624). | ||
759 | */ | ||
760 | #ifdef TILE_NET_PANIC_ON_BAD | ||
761 | dump_packet(buf, len, "rx"); | ||
762 | panic("Bad L4 checksum."); | ||
763 | #else | ||
764 | pr_warning("Bad L4 checksum on %d byte packet.\n", len); | ||
765 | #endif | ||
766 | } | ||
767 | if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) && | ||
768 | NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) { | ||
769 | dump_packet(buf, len, "rx"); | ||
770 | panic("Bad L3 checksum."); | ||
771 | } | ||
772 | switch (NETIO_PKT_STATUS_M(metadata, pkt)) { | ||
773 | case NETIO_PKT_STATUS_OVERSIZE: | ||
774 | if (len >= 64) { | ||
775 | dump_packet(buf, len, "rx"); | ||
776 | panic("Unexpected OVERSIZE."); | ||
777 | } | ||
778 | break; | ||
779 | case NETIO_PKT_STATUS_BAD: | ||
780 | #ifdef TILE_NET_PANIC_ON_BAD | ||
781 | dump_packet(buf, len, "rx"); | ||
782 | panic("Unexpected BAD packet."); | ||
783 | #else | ||
784 | pr_warning("Unexpected BAD %d byte packet.\n", len); | ||
785 | #endif | ||
786 | } | ||
787 | #endif | ||
788 | |||
789 | filter = 0; | ||
790 | |||
791 | if (!(dev->flags & IFF_UP)) { | ||
792 | /* Filter packets received before we're up. */ | ||
793 | filter = 1; | ||
794 | } else if (!(dev->flags & IFF_PROMISC)) { | ||
795 | /* | ||
796 | * FIXME: Implement HW multicast filter. | ||
797 | */ | ||
798 | if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) { | ||
799 | /* Filter packets not for our address. */ | ||
800 | const u8 *mine = dev->dev_addr; | ||
801 | filter = compare_ether_addr(mine, buf); | ||
802 | } | ||
803 | } | ||
804 | |||
805 | #ifdef IGNORE_DUP_ACKS | ||
806 | |||
807 | if (len != 66) { | ||
808 | /* FIXME: Must check "is_tcp_ack(buf, len)" somehow. */ | ||
809 | |||
810 | other++; | ||
811 | |||
812 | } else if (index2 == | ||
813 | qsp->__packet_receive_queue.__packet_write) { | ||
814 | |||
815 | final++; | ||
816 | |||
817 | } else { | ||
818 | |||
819 | netio_pkt_t *pkt2 = (netio_pkt_t *) | ||
820 | ((unsigned long) &qsp[1] + index2); | ||
821 | |||
822 | netio_pkt_metadata_t *metadata2 = | ||
823 | NETIO_PKT_METADATA(pkt2); | ||
824 | |||
825 | /* Extract the packet size. */ | ||
826 | unsigned long len2 = | ||
827 | (NETIO_PKT_CUSTOM_LENGTH(pkt2) + | ||
828 | NET_IP_ALIGN - NETIO_PACKET_PADDING); | ||
829 | |||
830 | if (len2 == 66 && | ||
831 | NETIO_PKT_FLOW_HASH_M(metadata, pkt) == | ||
832 | NETIO_PKT_FLOW_HASH_M(metadata2, pkt2)) { | ||
833 | |||
834 | /* Extract the "linux_buffer_t". */ | ||
835 | unsigned int buffer2 = pkt2->__packet.word; | ||
836 | |||
837 | /* Convert "linux_buffer_t" to "va". */ | ||
838 | void *va2 = | ||
839 | __va((phys_addr_t)(buffer2 >> 1) << 7); | ||
840 | |||
841 | /* Extract the packet data pointer. */ | ||
842 | /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ | ||
843 | unsigned char *buf2 = va2 + NET_IP_ALIGN; | ||
844 | |||
845 | /* Invalidate the packet buffer. */ | ||
846 | if (!hash_default) | ||
847 | __inv_buffer(buf2, len2); | ||
848 | |||
849 | if (is_dup_ack(buf, buf2, len)) { | ||
850 | skip++; | ||
851 | filter = 1; | ||
852 | } else { | ||
853 | keep++; | ||
854 | } | ||
855 | } | ||
856 | } | ||
857 | |||
858 | if (net_ratelimit()) | ||
859 | pr_info("Other %d Final %d Keep %d Skip %d.\n", | ||
860 | other, final, keep, skip); | ||
861 | |||
862 | #endif | ||
863 | |||
864 | if (filter) { | ||
865 | |||
866 | /* ISSUE: Update "drop" statistics? */ | ||
867 | |||
868 | tile_net_provide_linux_buffer(info, va, small); | ||
869 | |||
870 | } else { | ||
871 | |||
872 | /* Acquire the associated "skb". */ | ||
873 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | ||
874 | struct sk_buff *skb = *skb_ptr; | ||
875 | |||
876 | /* Paranoia. */ | ||
877 | if (skb->data != buf) | ||
878 | panic("Corrupt linux buffer from LIPP! " | ||
879 | "VA=%p, skb=%p, skb->data=%p\n", | ||
880 | va, skb, skb->data); | ||
881 | |||
882 | /* Encode the actual packet length. */ | ||
883 | skb_put(skb, len); | ||
884 | |||
885 | /* NOTE: This call also sets "skb->dev = dev". */ | ||
886 | skb->protocol = eth_type_trans(skb, dev); | ||
887 | |||
888 | /* ISSUE: Discard corrupt packets? */ | ||
889 | /* ISSUE: Discard packets with bad checksums? */ | ||
890 | |||
891 | /* Avoid recomputing TCP/UDP checksums. */ | ||
892 | if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt)) | ||
893 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
894 | |||
895 | netif_receive_skb(skb); | ||
896 | |||
897 | stats->rx_packets++; | ||
898 | stats->rx_bytes += len; | ||
899 | |||
900 | if (small) | ||
901 | info->num_needed_small_buffers++; | ||
902 | else | ||
903 | info->num_needed_large_buffers++; | ||
904 | } | ||
905 | |||
906 | /* Return four credits after every fourth packet. */ | ||
907 | if (--qup->__receive_credit_remaining == 0) { | ||
908 | u32 interval = qup->__receive_credit_interval; | ||
909 | qup->__receive_credit_remaining = interval; | ||
910 | __netio_fastio_return_credits(qup->__fastio_index, interval); | ||
911 | } | ||
912 | |||
913 | /* Consume this packet. */ | ||
914 | qup->__packet_receive_read = index2; | ||
915 | |||
916 | return !filter; | ||
917 | } | ||
918 | |||
919 | |||
920 | /* | ||
921 | * Handle some packets for the given device on the current CPU. | ||
922 | * | ||
923 | * ISSUE: The "rotting packet" race condition occurs if a packet | ||
924 | * arrives after the queue appears to be empty, and before the | ||
925 | * hypervisor interrupt is re-enabled. | ||
926 | */ | ||
927 | static int tile_net_poll(struct napi_struct *napi, int budget) | ||
928 | { | ||
929 | struct net_device *dev = napi->dev; | ||
930 | struct tile_net_priv *priv = netdev_priv(dev); | ||
931 | int my_cpu = smp_processor_id(); | ||
932 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | ||
933 | struct tile_netio_queue *queue = &info->queue; | ||
934 | netio_queue_impl_t *qsp = queue->__system_part; | ||
935 | netio_queue_user_impl_t *qup = &queue->__user_part; | ||
936 | |||
937 | unsigned int work = 0; | ||
938 | |||
939 | while (1) { | ||
940 | int index = qup->__packet_receive_read; | ||
941 | if (index == qsp->__packet_receive_queue.__packet_write) | ||
942 | break; | ||
943 | |||
944 | if (tile_net_poll_aux(info, index)) { | ||
945 | if (++work >= budget) | ||
946 | goto done; | ||
947 | } | ||
948 | } | ||
949 | |||
950 | napi_complete(&info->napi); | ||
951 | |||
952 | /* Re-enable hypervisor interrupts. */ | ||
953 | enable_percpu_irq(priv->intr_id); | ||
954 | |||
955 | /* HACK: Avoid the "rotting packet" problem. */ | ||
956 | if (qup->__packet_receive_read != | ||
957 | qsp->__packet_receive_queue.__packet_write) | ||
958 | napi_schedule(&info->napi); | ||
959 | |||
960 | /* ISSUE: Handle completions? */ | ||
961 | |||
962 | done: | ||
963 | |||
964 | tile_net_provide_needed_buffers(info); | ||
965 | |||
966 | return work; | ||
967 | } | ||
968 | |||
969 | |||
970 | /* | ||
971 | * Handle an ingress interrupt for the given device on the current cpu. | ||
972 | */ | ||
973 | static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr) | ||
974 | { | ||
975 | struct net_device *dev = (struct net_device *)dev_ptr; | ||
976 | struct tile_net_priv *priv = netdev_priv(dev); | ||
977 | int my_cpu = smp_processor_id(); | ||
978 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | ||
979 | |||
980 | /* Disable hypervisor interrupt. */ | ||
981 | disable_percpu_irq(priv->intr_id); | ||
982 | |||
983 | napi_schedule(&info->napi); | ||
984 | |||
985 | return IRQ_HANDLED; | ||
986 | } | ||
987 | |||
988 | |||
989 | /* | ||
990 | * One time initialization per interface. | ||
991 | */ | ||
992 | static int tile_net_open_aux(struct net_device *dev) | ||
993 | { | ||
994 | struct tile_net_priv *priv = netdev_priv(dev); | ||
995 | |||
996 | int ret; | ||
997 | int dummy; | ||
998 | unsigned int epp_lotar; | ||
999 | |||
1000 | /* | ||
1001 | * Find out where EPP memory should be homed. | ||
1002 | */ | ||
1003 | ret = hv_dev_pread(priv->hv_devhdl, 0, | ||
1004 | (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar), | ||
1005 | NETIO_EPP_SHM_OFF); | ||
1006 | if (ret < 0) { | ||
1007 | pr_err("could not read epp_shm_queue lotar.\n"); | ||
1008 | return -EIO; | ||
1009 | } | ||
1010 | |||
1011 | /* | ||
1012 | * Home the page on the EPP. | ||
1013 | */ | ||
1014 | { | ||
1015 | int epp_home = hv_lotar_to_cpu(epp_lotar); | ||
1016 | struct page *page = virt_to_page(priv->epp_queue); | ||
1017 | homecache_change_page_home(page, 0, epp_home); | ||
1018 | } | ||
1019 | |||
1020 | /* | ||
1021 | * Register the EPP shared memory queue. | ||
1022 | */ | ||
1023 | { | ||
1024 | netio_ipp_address_t ea = { | ||
1025 | .va = 0, | ||
1026 | .pa = __pa(priv->epp_queue), | ||
1027 | .pte = hv_pte(0), | ||
1028 | .size = PAGE_SIZE, | ||
1029 | }; | ||
1030 | ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar); | ||
1031 | ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3); | ||
1032 | ret = hv_dev_pwrite(priv->hv_devhdl, 0, | ||
1033 | (HV_VirtAddr)&ea, | ||
1034 | sizeof(ea), | ||
1035 | NETIO_EPP_SHM_OFF); | ||
1036 | if (ret < 0) | ||
1037 | return -EIO; | ||
1038 | } | ||
1039 | |||
1040 | /* | ||
1041 | * Start LIPP/LEPP. | ||
1042 | */ | ||
1043 | if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | ||
1044 | sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) { | ||
1045 | pr_warning("Failed to start LIPP/LEPP.\n"); | ||
1046 | return -EIO; | ||
1047 | } | ||
1048 | |||
1049 | return 0; | ||
1050 | } | ||
1051 | |||
1052 | |||
1053 | /* | ||
1054 | * Register with hypervisor on each CPU. | ||
1055 | * | ||
1056 | * Strangely, this function does important things even if it "fails", | ||
1057 | * which is especially common if the link is not up yet. Hopefully | ||
1058 | * these things are all "harmless" if done twice! | ||
1059 | */ | ||
1060 | static void tile_net_register(void *dev_ptr) | ||
1061 | { | ||
1062 | struct net_device *dev = (struct net_device *)dev_ptr; | ||
1063 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1064 | int my_cpu = smp_processor_id(); | ||
1065 | struct tile_net_cpu *info; | ||
1066 | |||
1067 | struct tile_netio_queue *queue; | ||
1068 | |||
1069 | /* Only network cpus can receive packets. */ | ||
1070 | int queue_id = | ||
1071 | cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255; | ||
1072 | |||
1073 | netio_input_config_t config = { | ||
1074 | .flags = 0, | ||
1075 | .num_receive_packets = priv->network_cpus_credits, | ||
1076 | .queue_id = queue_id | ||
1077 | }; | ||
1078 | |||
1079 | int ret = 0; | ||
1080 | netio_queue_impl_t *queuep; | ||
1081 | |||
1082 | PDEBUG("tile_net_register(queue_id %d)\n", queue_id); | ||
1083 | |||
1084 | if (!strcmp(dev->name, "xgbe0")) | ||
1085 | info = &__get_cpu_var(hv_xgbe0); | ||
1086 | else if (!strcmp(dev->name, "xgbe1")) | ||
1087 | info = &__get_cpu_var(hv_xgbe1); | ||
1088 | else if (!strcmp(dev->name, "gbe0")) | ||
1089 | info = &__get_cpu_var(hv_gbe0); | ||
1090 | else if (!strcmp(dev->name, "gbe1")) | ||
1091 | info = &__get_cpu_var(hv_gbe1); | ||
1092 | else | ||
1093 | BUG(); | ||
1094 | |||
1095 | /* Initialize the egress timer. */ | ||
1096 | init_timer(&info->egress_timer); | ||
1097 | info->egress_timer.data = (long)info; | ||
1098 | info->egress_timer.function = tile_net_handle_egress_timer; | ||
1099 | |||
1100 | priv->cpu[my_cpu] = info; | ||
1101 | |||
1102 | /* | ||
1103 | * Register ourselves with the IPP. | ||
1104 | */ | ||
1105 | ret = hv_dev_pwrite(priv->hv_devhdl, 0, | ||
1106 | (HV_VirtAddr)&config, | ||
1107 | sizeof(netio_input_config_t), | ||
1108 | NETIO_IPP_INPUT_REGISTER_OFF); | ||
1109 | PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", | ||
1110 | ret); | ||
1111 | if (ret < 0) { | ||
1112 | printk(KERN_DEBUG "hv_dev_pwrite NETIO_IPP_INPUT_REGISTER_OFF" | ||
1113 | " failure %d\n", ret); | ||
1114 | info->link_down = (ret == NETIO_LINK_DOWN); | ||
1115 | return; | ||
1116 | } | ||
1117 | |||
1118 | /* | ||
1119 | * Get the pointer to our queue's system part. | ||
1120 | */ | ||
1121 | |||
1122 | ret = hv_dev_pread(priv->hv_devhdl, 0, | ||
1123 | (HV_VirtAddr)&queuep, | ||
1124 | sizeof(netio_queue_impl_t *), | ||
1125 | NETIO_IPP_INPUT_REGISTER_OFF); | ||
1126 | PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", | ||
1127 | ret); | ||
1128 | PDEBUG("queuep %p\n", queuep); | ||
1129 | if (ret <= 0) { | ||
1130 | /* ISSUE: Shouldn't this be a fatal error? */ | ||
1131 | pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n"); | ||
1132 | return; | ||
1133 | } | ||
1134 | |||
1135 | queue = &info->queue; | ||
1136 | |||
1137 | queue->__system_part = queuep; | ||
1138 | |||
1139 | memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t)); | ||
1140 | |||
1141 | /* This is traditionally "config.num_receive_packets / 2". */ | ||
1142 | queue->__user_part.__receive_credit_interval = 4; | ||
1143 | queue->__user_part.__receive_credit_remaining = | ||
1144 | queue->__user_part.__receive_credit_interval; | ||
1145 | |||
1146 | /* | ||
1147 | * Get a fastio index from the hypervisor. | ||
1148 | * ISSUE: Shouldn't this check the result? | ||
1149 | */ | ||
1150 | ret = hv_dev_pread(priv->hv_devhdl, 0, | ||
1151 | (HV_VirtAddr)&queue->__user_part.__fastio_index, | ||
1152 | sizeof(queue->__user_part.__fastio_index), | ||
1153 | NETIO_IPP_GET_FASTIO_OFF); | ||
1154 | PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret); | ||
1155 | |||
1156 | netif_napi_add(dev, &info->napi, tile_net_poll, 64); | ||
1157 | |||
1158 | /* Now we are registered. */ | ||
1159 | info->registered = true; | ||
1160 | } | ||
1161 | |||
1162 | |||
1163 | /* | ||
1164 | * Unregister with hypervisor on each CPU. | ||
1165 | */ | ||
1166 | static void tile_net_unregister(void *dev_ptr) | ||
1167 | { | ||
1168 | struct net_device *dev = (struct net_device *)dev_ptr; | ||
1169 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1170 | int my_cpu = smp_processor_id(); | ||
1171 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | ||
1172 | |||
1173 | int ret = 0; | ||
1174 | int dummy = 0; | ||
1175 | |||
1176 | /* Do nothing if never registered. */ | ||
1177 | if (info == NULL) | ||
1178 | return; | ||
1179 | |||
1180 | /* Do nothing if already unregistered. */ | ||
1181 | if (!info->registered) | ||
1182 | return; | ||
1183 | |||
1184 | /* | ||
1185 | * Unregister ourselves with LIPP. | ||
1186 | */ | ||
1187 | ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | ||
1188 | sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF); | ||
1189 | PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_UNREGISTER_OFF) returned %d\n", | ||
1190 | ret); | ||
1191 | if (ret < 0) { | ||
1192 | /* FIXME: Just panic? */ | ||
1193 | pr_err("hv_dev_pwrite NETIO_IPP_INPUT_UNREGISTER_OFF" | ||
1194 | " failure %d\n", ret); | ||
1195 | } | ||
1196 | |||
1197 | /* | ||
1198 | * Discard all packets still in our NetIO queue. Hopefully, | ||
1199 | * once the unregister call is complete, there will be no | ||
1200 | * packets still in flight on the IDN. | ||
1201 | */ | ||
1202 | tile_net_discard_packets(dev); | ||
1203 | |||
1204 | /* Reset state. */ | ||
1205 | info->num_needed_small_buffers = 0; | ||
1206 | info->num_needed_large_buffers = 0; | ||
1207 | |||
1208 | /* Cancel egress timer. */ | ||
1209 | del_timer(&info->egress_timer); | ||
1210 | info->egress_timer_scheduled = false; | ||
1211 | |||
1212 | netif_napi_del(&info->napi); | ||
1213 | |||
1214 | /* Now we are unregistered. */ | ||
1215 | info->registered = false; | ||
1216 | } | ||
1217 | |||
1218 | |||
1219 | /* | ||
1220 | * Helper function for "tile_net_stop()". | ||
1221 | * | ||
1222 | * Also used to handle registration failure in "tile_net_open_inner()", | ||
1223 | * when "fully_opened" is known to be false, and the various extra | ||
1224 | * steps in "tile_net_stop()" are not necessary. ISSUE: It might be | ||
1225 | * simpler if we could just call "tile_net_stop()" anyway. | ||
1226 | */ | ||
1227 | static void tile_net_stop_aux(struct net_device *dev) | ||
1228 | { | ||
1229 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1230 | |||
1231 | int dummy = 0; | ||
1232 | |||
1233 | /* Unregister all tiles, so LIPP will stop delivering packets. */ | ||
1234 | on_each_cpu(tile_net_unregister, (void *)dev, 1); | ||
1235 | |||
1236 | /* Stop LIPP/LEPP. */ | ||
1237 | if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | ||
1238 | sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0) | ||
1239 | panic("Failed to stop LIPP/LEPP!\n"); | ||
1240 | |||
1241 | priv->partly_opened = 0; | ||
1242 | } | ||
1243 | |||
1244 | |||
1245 | /* | ||
1246 | * Disable ingress interrupts for the given device on the current cpu. | ||
1247 | */ | ||
1248 | static void tile_net_disable_intr(void *dev_ptr) | ||
1249 | { | ||
1250 | struct net_device *dev = (struct net_device *)dev_ptr; | ||
1251 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1252 | int my_cpu = smp_processor_id(); | ||
1253 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | ||
1254 | |||
1255 | /* Disable hypervisor interrupt. */ | ||
1256 | disable_percpu_irq(priv->intr_id); | ||
1257 | |||
1258 | /* Disable NAPI if needed. */ | ||
1259 | if (info != NULL && info->napi_enabled) { | ||
1260 | napi_disable(&info->napi); | ||
1261 | info->napi_enabled = false; | ||
1262 | } | ||
1263 | } | ||
1264 | |||
1265 | |||
1266 | /* | ||
1267 | * Enable ingress interrupts for the given device on the current cpu. | ||
1268 | */ | ||
1269 | static void tile_net_enable_intr(void *dev_ptr) | ||
1270 | { | ||
1271 | struct net_device *dev = (struct net_device *)dev_ptr; | ||
1272 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1273 | int my_cpu = smp_processor_id(); | ||
1274 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | ||
1275 | |||
1276 | /* Enable hypervisor interrupt. */ | ||
1277 | enable_percpu_irq(priv->intr_id); | ||
1278 | |||
1279 | /* Enable NAPI. */ | ||
1280 | napi_enable(&info->napi); | ||
1281 | info->napi_enabled = true; | ||
1282 | } | ||
1283 | |||
1284 | |||
1285 | /* | ||
1286 | * tile_net_open_inner does most of the work of bringing up the interface. | ||
1287 | * It's called from tile_net_open(), and also from tile_net_retry_open(). | ||
1288 | * The return value is 0 if the interface was brought up, < 0 if | ||
1289 | * tile_net_open() should return the return value as an error, and > 0 if | ||
1290 | * tile_net_open() should return success and schedule a work item to | ||
1291 | * periodically retry the bringup. | ||
1292 | */ | ||
1293 | static int tile_net_open_inner(struct net_device *dev) | ||
1294 | { | ||
1295 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1296 | int my_cpu = smp_processor_id(); | ||
1297 | struct tile_net_cpu *info; | ||
1298 | struct tile_netio_queue *queue; | ||
1299 | unsigned int irq; | ||
1300 | int i; | ||
1301 | |||
1302 | /* | ||
1303 | * First try to register just on the local CPU, and handle any | ||
1304 | * semi-expected "link down" failure specially. Note that we | ||
1305 | * do NOT call "tile_net_stop_aux()", unlike below. | ||
1306 | */ | ||
1307 | tile_net_register(dev); | ||
1308 | info = priv->cpu[my_cpu]; | ||
1309 | if (!info->registered) { | ||
1310 | if (info->link_down) | ||
1311 | return 1; | ||
1312 | return -EAGAIN; | ||
1313 | } | ||
1314 | |||
1315 | /* | ||
1316 | * Now register everywhere else. If any registration fails, | ||
1317 | * even for "link down" (which might not be possible), we | ||
1318 | * clean up using "tile_net_stop_aux()". | ||
1319 | */ | ||
1320 | smp_call_function(tile_net_register, (void *)dev, 1); | ||
1321 | for_each_online_cpu(i) { | ||
1322 | if (!priv->cpu[i]->registered) { | ||
1323 | tile_net_stop_aux(dev); | ||
1324 | return -EAGAIN; | ||
1325 | } | ||
1326 | } | ||
1327 | |||
1328 | queue = &info->queue; | ||
1329 | |||
1330 | /* | ||
1331 | * Set the device intr bit mask. | ||
1332 | * The tile_net_register above sets per tile __intr_id. | ||
1333 | */ | ||
1334 | priv->intr_id = queue->__system_part->__intr_id; | ||
1335 | BUG_ON(!priv->intr_id); | ||
1336 | |||
1337 | /* | ||
1338 | * Register the device interrupt handler. | ||
1339 | * The __ffs() function returns the index into the interrupt handler | ||
1340 | * table from the interrupt bit mask which should have one bit | ||
1341 | * and one bit only set. | ||
1342 | */ | ||
1343 | irq = __ffs(priv->intr_id); | ||
1344 | tile_irq_activate(irq, TILE_IRQ_PERCPU); | ||
1345 | BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt, | ||
1346 | 0, dev->name, (void *)dev) != 0); | ||
1347 | |||
1348 | /* ISSUE: How could "priv->fully_opened" ever be "true" here? */ | ||
1349 | |||
1350 | if (!priv->fully_opened) { | ||
1351 | |||
1352 | int dummy = 0; | ||
1353 | |||
1354 | /* Allocate initial buffers. */ | ||
1355 | |||
1356 | int max_buffers = | ||
1357 | priv->network_cpus_count * priv->network_cpus_credits; | ||
1358 | |||
1359 | info->num_needed_small_buffers = | ||
1360 | min(LIPP_SMALL_BUFFERS, max_buffers); | ||
1361 | |||
1362 | info->num_needed_large_buffers = | ||
1363 | min(LIPP_LARGE_BUFFERS, max_buffers); | ||
1364 | |||
1365 | tile_net_provide_needed_buffers(info); | ||
1366 | |||
1367 | if (info->num_needed_small_buffers != 0 || | ||
1368 | info->num_needed_large_buffers != 0) | ||
1369 | panic("Insufficient memory for buffer stack!"); | ||
1370 | |||
1371 | /* Start LIPP/LEPP and activate "ingress" at the shim. */ | ||
1372 | if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | ||
1373 | sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0) | ||
1374 | panic("Failed to activate the LIPP Shim!\n"); | ||
1375 | |||
1376 | priv->fully_opened = 1; | ||
1377 | } | ||
1378 | |||
1379 | /* On each tile, enable the hypervisor to trigger interrupts. */ | ||
1380 | /* ISSUE: Do this before starting LIPP/LEPP? */ | ||
1381 | on_each_cpu(tile_net_enable_intr, (void *)dev, 1); | ||
1382 | |||
1383 | /* Start our transmit queue. */ | ||
1384 | netif_start_queue(dev); | ||
1385 | |||
1386 | return 0; | ||
1387 | } | ||
1388 | |||
1389 | |||
1390 | /* | ||
1391 | * Called periodically to retry bringing up the NetIO interface, | ||
1392 | * if it doesn't come up cleanly during tile_net_open(). | ||
1393 | */ | ||
1394 | static void tile_net_open_retry(struct work_struct *w) | ||
1395 | { | ||
1396 | struct delayed_work *dw = | ||
1397 | container_of(w, struct delayed_work, work); | ||
1398 | |||
1399 | struct tile_net_priv *priv = | ||
1400 | container_of(dw, struct tile_net_priv, retry_work); | ||
1401 | |||
1402 | /* | ||
1403 | * Try to bring the NetIO interface up. If it fails, reschedule | ||
1404 | * ourselves to try again later; otherwise, tell Linux we now have | ||
1405 | * a working link. ISSUE: What if the return value is negative? | ||
1406 | */ | ||
1407 | if (tile_net_open_inner(priv->dev)) | ||
1408 | schedule_delayed_work_on(singlethread_cpu, &priv->retry_work, | ||
1409 | TILE_NET_RETRY_INTERVAL); | ||
1410 | else | ||
1411 | netif_carrier_on(priv->dev); | ||
1412 | } | ||
1413 | |||
1414 | |||
1415 | /* | ||
1416 | * Called when a network interface is made active. | ||
1417 | * | ||
1418 | * Returns 0 on success, negative value on failure. | ||
1419 | * | ||
1420 | * The open entry point is called when a network interface is made | ||
1421 | * active by the system (IFF_UP). At this point all resources needed | ||
1422 | * for transmit and receive operations are allocated, the interrupt | ||
1423 | * handler is registered with the OS, the watchdog timer is started, | ||
1424 | * and the stack is notified that the interface is ready. | ||
1425 | * | ||
1426 | * If the actual link is not available yet, then we tell Linux that | ||
1427 | * we have no carrier, and we keep checking until the link comes up. | ||
1428 | */ | ||
1429 | static int tile_net_open(struct net_device *dev) | ||
1430 | { | ||
1431 | int ret = 0; | ||
1432 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1433 | |||
1434 | /* | ||
1435 | * We rely on priv->partly_opened to tell us if this is the | ||
1436 | * first time this interface is being brought up. If it is | ||
1437 | * set, the IPP was already initialized and should not be | ||
1438 | * initialized again. | ||
1439 | */ | ||
1440 | if (!priv->partly_opened) { | ||
1441 | |||
1442 | int count; | ||
1443 | int credits; | ||
1444 | |||
1445 | /* Initialize LIPP/LEPP, and start the Shim. */ | ||
1446 | ret = tile_net_open_aux(dev); | ||
1447 | if (ret < 0) { | ||
1448 | pr_err("tile_net_open_aux failed: %d\n", ret); | ||
1449 | return ret; | ||
1450 | } | ||
1451 | |||
1452 | /* Analyze the network cpus. */ | ||
1453 | |||
1454 | if (network_cpus_used) | ||
1455 | cpumask_copy(&priv->network_cpus_map, | ||
1456 | &network_cpus_map); | ||
1457 | else | ||
1458 | cpumask_copy(&priv->network_cpus_map, cpu_online_mask); | ||
1459 | |||
1460 | |||
1461 | count = cpumask_weight(&priv->network_cpus_map); | ||
1462 | |||
1463 | /* Limit credits to available buffers, and apply min. */ | ||
1464 | credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1); | ||
1465 | |||
1466 | /* Apply "GBE" max limit. */ | ||
1467 | /* ISSUE: Use higher limit for XGBE? */ | ||
1468 | credits = min(NETIO_MAX_RECEIVE_PKTS, credits); | ||
1469 | |||
1470 | priv->network_cpus_count = count; | ||
1471 | priv->network_cpus_credits = credits; | ||
1472 | |||
1473 | #ifdef TILE_NET_DEBUG | ||
1474 | pr_info("Using %d network cpus, with %d credits each\n", | ||
1475 | priv->network_cpus_count, priv->network_cpus_credits); | ||
1476 | #endif | ||
1477 | |||
1478 | priv->partly_opened = 1; | ||
1479 | } | ||
1480 | |||
1481 | /* | ||
1482 | * Attempt to bring up the link. | ||
1483 | */ | ||
1484 | ret = tile_net_open_inner(dev); | ||
1485 | if (ret <= 0) { | ||
1486 | if (ret == 0) | ||
1487 | netif_carrier_on(dev); | ||
1488 | return ret; | ||
1489 | } | ||
1490 | |||
1491 | /* | ||
1492 | * We were unable to bring up the NetIO interface, but we want to | ||
1493 | * try again in a little bit. Tell Linux that we have no carrier | ||
1494 | * so it doesn't try to use the interface before the link comes up | ||
1495 | * and then remember to try again later. | ||
1496 | */ | ||
1497 | netif_carrier_off(dev); | ||
1498 | schedule_delayed_work_on(singlethread_cpu, &priv->retry_work, | ||
1499 | TILE_NET_RETRY_INTERVAL); | ||
1500 | |||
1501 | return 0; | ||
1502 | } | ||
1503 | |||
1504 | |||
1505 | /* | ||
1506 | * Disables a network interface. | ||
1507 | * | ||
1508 | * Returns 0, this is not allowed to fail. | ||
1509 | * | ||
1510 | * The close entry point is called when an interface is de-activated | ||
1511 | * by the OS. The hardware is still under the drivers control, but | ||
1512 | * needs to be disabled. A global MAC reset is issued to stop the | ||
1513 | * hardware, and all transmit and receive resources are freed. | ||
1514 | * | ||
1515 | * ISSUE: Can this can be called while "tile_net_poll()" is running? | ||
1516 | */ | ||
1517 | static int tile_net_stop(struct net_device *dev) | ||
1518 | { | ||
1519 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1520 | |||
1521 | bool pending = true; | ||
1522 | |||
1523 | PDEBUG("tile_net_stop()\n"); | ||
1524 | |||
1525 | /* ISSUE: Only needed if not yet fully open. */ | ||
1526 | cancel_delayed_work_sync(&priv->retry_work); | ||
1527 | |||
1528 | /* Can't transmit any more. */ | ||
1529 | netif_stop_queue(dev); | ||
1530 | |||
1531 | /* | ||
1532 | * Disable hypervisor interrupts on each tile. | ||
1533 | */ | ||
1534 | on_each_cpu(tile_net_disable_intr, (void *)dev, 1); | ||
1535 | |||
1536 | /* | ||
1537 | * Unregister the interrupt handler. | ||
1538 | * The __ffs() function returns the index into the interrupt handler | ||
1539 | * table from the interrupt bit mask which should have one bit | ||
1540 | * and one bit only set. | ||
1541 | */ | ||
1542 | if (priv->intr_id) | ||
1543 | free_irq(__ffs(priv->intr_id), dev); | ||
1544 | |||
1545 | /* | ||
1546 | * Drain all the LIPP buffers. | ||
1547 | */ | ||
1548 | |||
1549 | while (true) { | ||
1550 | int buffer; | ||
1551 | |||
1552 | /* NOTE: This should never fail. */ | ||
1553 | if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, | ||
1554 | sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0) | ||
1555 | break; | ||
1556 | |||
1557 | /* Stop when done. */ | ||
1558 | if (buffer == 0) | ||
1559 | break; | ||
1560 | |||
1561 | { | ||
1562 | /* Convert "linux_buffer_t" to "va". */ | ||
1563 | void *va = __va((phys_addr_t)(buffer >> 1) << 7); | ||
1564 | |||
1565 | /* Acquire the associated "skb". */ | ||
1566 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | ||
1567 | struct sk_buff *skb = *skb_ptr; | ||
1568 | |||
1569 | kfree_skb(skb); | ||
1570 | } | ||
1571 | } | ||
1572 | |||
1573 | /* Stop LIPP/LEPP. */ | ||
1574 | tile_net_stop_aux(dev); | ||
1575 | |||
1576 | |||
1577 | priv->fully_opened = 0; | ||
1578 | |||
1579 | |||
1580 | /* | ||
1581 | * XXX: ISSUE: It appears that, in practice anyway, by the | ||
1582 | * time we get here, there are no pending completions. | ||
1583 | */ | ||
1584 | while (pending) { | ||
1585 | |||
1586 | struct sk_buff *olds[32]; | ||
1587 | unsigned int wanted = 32; | ||
1588 | unsigned int i, nolds = 0; | ||
1589 | |||
1590 | nolds = tile_net_lepp_grab_comps(dev, olds, | ||
1591 | wanted, &pending); | ||
1592 | |||
1593 | /* ISSUE: We have never actually seen this debug spew. */ | ||
1594 | if (nolds != 0) | ||
1595 | pr_info("During tile_net_stop(), grabbed %d comps.\n", | ||
1596 | nolds); | ||
1597 | |||
1598 | for (i = 0; i < nolds; i++) | ||
1599 | kfree_skb(olds[i]); | ||
1600 | } | ||
1601 | |||
1602 | |||
1603 | /* Wipe the EPP queue. */ | ||
1604 | memset(priv->epp_queue, 0, sizeof(lepp_queue_t)); | ||
1605 | |||
1606 | /* Evict the EPP queue. */ | ||
1607 | finv_buffer(priv->epp_queue, PAGE_SIZE); | ||
1608 | |||
1609 | return 0; | ||
1610 | } | ||
1611 | |||
1612 | |||
1613 | /* | ||
1614 | * Prepare the "frags" info for the resulting LEPP command. | ||
1615 | * | ||
1616 | * If needed, flush the memory used by the frags. | ||
1617 | */ | ||
1618 | static unsigned int tile_net_tx_frags(lepp_frag_t *frags, | ||
1619 | struct sk_buff *skb, | ||
1620 | void *b_data, unsigned int b_len) | ||
1621 | { | ||
1622 | unsigned int i, n = 0; | ||
1623 | |||
1624 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1625 | |||
1626 | phys_addr_t cpa; | ||
1627 | |||
1628 | if (b_len != 0) { | ||
1629 | |||
1630 | if (!hash_default) | ||
1631 | finv_buffer_remote(b_data, b_len); | ||
1632 | |||
1633 | cpa = __pa(b_data); | ||
1634 | frags[n].cpa_lo = cpa; | ||
1635 | frags[n].cpa_hi = cpa >> 32; | ||
1636 | frags[n].length = b_len; | ||
1637 | frags[n].hash_for_home = hash_default; | ||
1638 | n++; | ||
1639 | } | ||
1640 | |||
1641 | for (i = 0; i < sh->nr_frags; i++) { | ||
1642 | |||
1643 | skb_frag_t *f = &sh->frags[i]; | ||
1644 | unsigned long pfn = page_to_pfn(f->page); | ||
1645 | |||
1646 | /* FIXME: Compute "hash_for_home" properly. */ | ||
1647 | /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */ | ||
1648 | int hash_for_home = hash_default; | ||
1649 | |||
1650 | /* FIXME: Hmmm. */ | ||
1651 | if (!hash_default) { | ||
1652 | void *va = pfn_to_kaddr(pfn) + f->page_offset; | ||
1653 | BUG_ON(PageHighMem(f->page)); | ||
1654 | finv_buffer_remote(va, f->size); | ||
1655 | } | ||
1656 | |||
1657 | cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; | ||
1658 | frags[n].cpa_lo = cpa; | ||
1659 | frags[n].cpa_hi = cpa >> 32; | ||
1660 | frags[n].length = f->size; | ||
1661 | frags[n].hash_for_home = hash_for_home; | ||
1662 | n++; | ||
1663 | } | ||
1664 | |||
1665 | return n; | ||
1666 | } | ||
1667 | |||
1668 | |||
1669 | /* | ||
1670 | * This function takes "skb", consisting of a header template and a | ||
1671 | * payload, and hands it to LEPP, to emit as one or more segments, | ||
1672 | * each consisting of a possibly modified header, plus a piece of the | ||
1673 | * payload, via a process known as "tcp segmentation offload". | ||
1674 | * | ||
1675 | * Usually, "data" will contain the header template, of size "sh_len", | ||
1676 | * and "sh->frags" will contain "skb->data_len" bytes of payload, and | ||
1677 | * there will be "sh->gso_segs" segments. | ||
1678 | * | ||
1679 | * Sometimes, if "sendfile()" requires copying, we will be called with | ||
1680 | * "data" containing the header and payload, with "frags" being empty. | ||
1681 | * | ||
1682 | * In theory, "sh->nr_frags" could be 3, but in practice, it seems | ||
1683 | * that this will never actually happen. | ||
1684 | * | ||
1685 | * See "emulate_large_send_offload()" for some reference code, which | ||
1686 | * does not handle checksumming. | ||
1687 | * | ||
1688 | * ISSUE: How do we make sure that high memory DMA does not migrate? | ||
1689 | */ | ||
1690 | static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) | ||
1691 | { | ||
1692 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1693 | int my_cpu = smp_processor_id(); | ||
1694 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | ||
1695 | struct tile_net_stats_t *stats = &info->stats; | ||
1696 | |||
1697 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1698 | |||
1699 | unsigned char *data = skb->data; | ||
1700 | |||
1701 | /* The ip header follows the ethernet header. */ | ||
1702 | struct iphdr *ih = ip_hdr(skb); | ||
1703 | unsigned int ih_len = ih->ihl * 4; | ||
1704 | |||
1705 | /* Note that "nh == ih", by definition. */ | ||
1706 | unsigned char *nh = skb_network_header(skb); | ||
1707 | unsigned int eh_len = nh - data; | ||
1708 | |||
1709 | /* The tcp header follows the ip header. */ | ||
1710 | struct tcphdr *th = (struct tcphdr *)(nh + ih_len); | ||
1711 | unsigned int th_len = th->doff * 4; | ||
1712 | |||
1713 | /* The total number of header bytes. */ | ||
1714 | /* NOTE: This may be less than skb_headlen(skb). */ | ||
1715 | unsigned int sh_len = eh_len + ih_len + th_len; | ||
1716 | |||
1717 | /* The number of payload bytes at "skb->data + sh_len". */ | ||
1718 | /* This is non-zero for sendfile() without HIGHDMA. */ | ||
1719 | unsigned int b_len = skb_headlen(skb) - sh_len; | ||
1720 | |||
1721 | /* The total number of payload bytes. */ | ||
1722 | unsigned int d_len = b_len + skb->data_len; | ||
1723 | |||
1724 | /* The maximum payload size. */ | ||
1725 | unsigned int p_len = sh->gso_size; | ||
1726 | |||
1727 | /* The total number of segments. */ | ||
1728 | unsigned int num_segs = sh->gso_segs; | ||
1729 | |||
1730 | /* The temporary copy of the command. */ | ||
1731 | u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4]; | ||
1732 | lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body; | ||
1733 | |||
1734 | /* Analyze the "frags". */ | ||
1735 | unsigned int num_frags = | ||
1736 | tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len); | ||
1737 | |||
1738 | /* The size of the command, including frags and header. */ | ||
1739 | size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len); | ||
1740 | |||
1741 | /* The command header. */ | ||
1742 | lepp_tso_cmd_t cmd_init = { | ||
1743 | .tso = true, | ||
1744 | .header_size = sh_len, | ||
1745 | .ip_offset = eh_len, | ||
1746 | .tcp_offset = eh_len + ih_len, | ||
1747 | .payload_size = p_len, | ||
1748 | .num_frags = num_frags, | ||
1749 | }; | ||
1750 | |||
1751 | unsigned long irqflags; | ||
1752 | |||
1753 | lepp_queue_t *eq = priv->epp_queue; | ||
1754 | |||
1755 | struct sk_buff *olds[4]; | ||
1756 | unsigned int wanted = 4; | ||
1757 | unsigned int i, nolds = 0; | ||
1758 | |||
1759 | unsigned int cmd_head, cmd_tail, cmd_next; | ||
1760 | unsigned int comp_tail; | ||
1761 | |||
1762 | unsigned int free_slots; | ||
1763 | |||
1764 | |||
1765 | /* Paranoia. */ | ||
1766 | BUG_ON(skb->protocol != htons(ETH_P_IP)); | ||
1767 | BUG_ON(ih->protocol != IPPROTO_TCP); | ||
1768 | BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL); | ||
1769 | BUG_ON(num_frags > LEPP_MAX_FRAGS); | ||
1770 | /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */ | ||
1771 | BUG_ON(num_segs <= 1); | ||
1772 | |||
1773 | |||
1774 | /* Finish preparing the command. */ | ||
1775 | |||
1776 | /* Copy the command header. */ | ||
1777 | *cmd = cmd_init; | ||
1778 | |||
1779 | /* Copy the "header". */ | ||
1780 | memcpy(&cmd->frags[num_frags], data, sh_len); | ||
1781 | |||
1782 | |||
1783 | /* Prefetch and wait, to minimize time spent holding the spinlock. */ | ||
1784 | prefetch_L1(&eq->comp_tail); | ||
1785 | prefetch_L1(&eq->cmd_tail); | ||
1786 | mb(); | ||
1787 | |||
1788 | |||
1789 | /* Enqueue the command. */ | ||
1790 | |||
1791 | spin_lock_irqsave(&priv->cmd_lock, irqflags); | ||
1792 | |||
1793 | /* | ||
1794 | * Handle completions if needed to make room. | ||
1795 | * HACK: Spin until there is sufficient room. | ||
1796 | */ | ||
1797 | free_slots = lepp_num_free_comp_slots(eq); | ||
1798 | if (free_slots < 1) { | ||
1799 | spin: | ||
1800 | nolds += tile_net_lepp_grab_comps(dev, olds + nolds, | ||
1801 | wanted - nolds, NULL); | ||
1802 | if (lepp_num_free_comp_slots(eq) < 1) | ||
1803 | goto spin; | ||
1804 | } | ||
1805 | |||
1806 | cmd_head = eq->cmd_head; | ||
1807 | cmd_tail = eq->cmd_tail; | ||
1808 | |||
1809 | /* NOTE: The "gotos" below are untested. */ | ||
1810 | |||
1811 | /* Prepare to advance, detecting full queue. */ | ||
1812 | cmd_next = cmd_tail + cmd_size; | ||
1813 | if (cmd_tail < cmd_head && cmd_next >= cmd_head) | ||
1814 | goto spin; | ||
1815 | if (cmd_next > LEPP_CMD_LIMIT) { | ||
1816 | cmd_next = 0; | ||
1817 | if (cmd_next == cmd_head) | ||
1818 | goto spin; | ||
1819 | } | ||
1820 | |||
1821 | /* Copy the command. */ | ||
1822 | memcpy(&eq->cmds[cmd_tail], cmd, cmd_size); | ||
1823 | |||
1824 | /* Advance. */ | ||
1825 | cmd_tail = cmd_next; | ||
1826 | |||
1827 | /* Record "skb" for eventual freeing. */ | ||
1828 | comp_tail = eq->comp_tail; | ||
1829 | eq->comps[comp_tail] = skb; | ||
1830 | LEPP_QINC(comp_tail); | ||
1831 | eq->comp_tail = comp_tail; | ||
1832 | |||
1833 | /* Flush before allowing LEPP to handle the command. */ | ||
1834 | __insn_mf(); | ||
1835 | |||
1836 | eq->cmd_tail = cmd_tail; | ||
1837 | |||
1838 | spin_unlock_irqrestore(&priv->cmd_lock, irqflags); | ||
1839 | |||
1840 | if (nolds == 0) | ||
1841 | nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL); | ||
1842 | |||
1843 | /* Handle completions. */ | ||
1844 | for (i = 0; i < nolds; i++) | ||
1845 | kfree_skb(olds[i]); | ||
1846 | |||
1847 | /* Update stats. */ | ||
1848 | stats->tx_packets += num_segs; | ||
1849 | stats->tx_bytes += (num_segs * sh_len) + d_len; | ||
1850 | |||
1851 | /* Make sure the egress timer is scheduled. */ | ||
1852 | tile_net_schedule_egress_timer(info); | ||
1853 | |||
1854 | return NETDEV_TX_OK; | ||
1855 | } | ||
1856 | |||
1857 | |||
1858 | /* | ||
1859 | * Transmit a packet (called by the kernel via "hard_start_xmit" hook). | ||
1860 | */ | ||
1861 | static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | ||
1862 | { | ||
1863 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1864 | int my_cpu = smp_processor_id(); | ||
1865 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | ||
1866 | struct tile_net_stats_t *stats = &info->stats; | ||
1867 | |||
1868 | unsigned long irqflags; | ||
1869 | |||
1870 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1871 | |||
1872 | unsigned int len = skb->len; | ||
1873 | unsigned char *data = skb->data; | ||
1874 | |||
1875 | unsigned int csum_start = skb->csum_start - skb_headroom(skb); | ||
1876 | |||
1877 | lepp_frag_t frags[LEPP_MAX_FRAGS]; | ||
1878 | |||
1879 | unsigned int num_frags; | ||
1880 | |||
1881 | lepp_queue_t *eq = priv->epp_queue; | ||
1882 | |||
1883 | struct sk_buff *olds[4]; | ||
1884 | unsigned int wanted = 4; | ||
1885 | unsigned int i, nolds = 0; | ||
1886 | |||
1887 | unsigned int cmd_size = sizeof(lepp_cmd_t); | ||
1888 | |||
1889 | unsigned int cmd_head, cmd_tail, cmd_next; | ||
1890 | unsigned int comp_tail; | ||
1891 | |||
1892 | lepp_cmd_t cmds[LEPP_MAX_FRAGS]; | ||
1893 | |||
1894 | unsigned int free_slots; | ||
1895 | |||
1896 | |||
1897 | /* | ||
1898 | * This is paranoia, since we think that if the link doesn't come | ||
1899 | * up, telling Linux we have no carrier will keep it from trying | ||
1900 | * to transmit. If it does, though, we can't execute this routine, | ||
1901 | * since data structures we depend on aren't set up yet. | ||
1902 | */ | ||
1903 | if (!info->registered) | ||
1904 | return NETDEV_TX_BUSY; | ||
1905 | |||
1906 | |||
1907 | /* Save the timestamp. */ | ||
1908 | dev->trans_start = jiffies; | ||
1909 | |||
1910 | |||
1911 | #ifdef TILE_NET_PARANOIA | ||
1912 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
1913 | if (hash_default) { | ||
1914 | HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data); | ||
1915 | if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) | ||
1916 | panic("Non-coherent egress buffer!"); | ||
1917 | } | ||
1918 | #endif | ||
1919 | #endif | ||
1920 | |||
1921 | |||
1922 | #ifdef TILE_NET_DUMP_PACKETS | ||
1923 | /* ISSUE: Does not dump the "frags". */ | ||
1924 | dump_packet(data, skb_headlen(skb), "tx"); | ||
1925 | #endif /* TILE_NET_DUMP_PACKETS */ | ||
1926 | |||
1927 | |||
1928 | if (sh->gso_size != 0) | ||
1929 | return tile_net_tx_tso(skb, dev); | ||
1930 | |||
1931 | |||
1932 | /* Prepare the commands. */ | ||
1933 | |||
1934 | num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); | ||
1935 | |||
1936 | for (i = 0; i < num_frags; i++) { | ||
1937 | |||
1938 | bool final = (i == num_frags - 1); | ||
1939 | |||
1940 | lepp_cmd_t cmd = { | ||
1941 | .cpa_lo = frags[i].cpa_lo, | ||
1942 | .cpa_hi = frags[i].cpa_hi, | ||
1943 | .length = frags[i].length, | ||
1944 | .hash_for_home = frags[i].hash_for_home, | ||
1945 | .send_completion = final, | ||
1946 | .end_of_packet = final | ||
1947 | }; | ||
1948 | |||
1949 | if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) { | ||
1950 | cmd.compute_checksum = 1; | ||
1951 | cmd.checksum_data.bits.start_byte = csum_start; | ||
1952 | cmd.checksum_data.bits.count = len - csum_start; | ||
1953 | cmd.checksum_data.bits.destination_byte = | ||
1954 | csum_start + skb->csum_offset; | ||
1955 | } | ||
1956 | |||
1957 | cmds[i] = cmd; | ||
1958 | } | ||
1959 | |||
1960 | |||
1961 | /* Prefetch and wait, to minimize time spent holding the spinlock. */ | ||
1962 | prefetch_L1(&eq->comp_tail); | ||
1963 | prefetch_L1(&eq->cmd_tail); | ||
1964 | mb(); | ||
1965 | |||
1966 | |||
1967 | /* Enqueue the commands. */ | ||
1968 | |||
1969 | spin_lock_irqsave(&priv->cmd_lock, irqflags); | ||
1970 | |||
1971 | /* | ||
1972 | * Handle completions if needed to make room. | ||
1973 | * HACK: Spin until there is sufficient room. | ||
1974 | */ | ||
1975 | free_slots = lepp_num_free_comp_slots(eq); | ||
1976 | if (free_slots < 1) { | ||
1977 | spin: | ||
1978 | nolds += tile_net_lepp_grab_comps(dev, olds + nolds, | ||
1979 | wanted - nolds, NULL); | ||
1980 | if (lepp_num_free_comp_slots(eq) < 1) | ||
1981 | goto spin; | ||
1982 | } | ||
1983 | |||
1984 | cmd_head = eq->cmd_head; | ||
1985 | cmd_tail = eq->cmd_tail; | ||
1986 | |||
1987 | /* NOTE: The "gotos" below are untested. */ | ||
1988 | |||
1989 | /* Copy the commands, or fail. */ | ||
1990 | for (i = 0; i < num_frags; i++) { | ||
1991 | |||
1992 | /* Prepare to advance, detecting full queue. */ | ||
1993 | cmd_next = cmd_tail + cmd_size; | ||
1994 | if (cmd_tail < cmd_head && cmd_next >= cmd_head) | ||
1995 | goto spin; | ||
1996 | if (cmd_next > LEPP_CMD_LIMIT) { | ||
1997 | cmd_next = 0; | ||
1998 | if (cmd_next == cmd_head) | ||
1999 | goto spin; | ||
2000 | } | ||
2001 | |||
2002 | /* Copy the command. */ | ||
2003 | *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i]; | ||
2004 | |||
2005 | /* Advance. */ | ||
2006 | cmd_tail = cmd_next; | ||
2007 | } | ||
2008 | |||
2009 | /* Record "skb" for eventual freeing. */ | ||
2010 | comp_tail = eq->comp_tail; | ||
2011 | eq->comps[comp_tail] = skb; | ||
2012 | LEPP_QINC(comp_tail); | ||
2013 | eq->comp_tail = comp_tail; | ||
2014 | |||
2015 | /* Flush before allowing LEPP to handle the command. */ | ||
2016 | __insn_mf(); | ||
2017 | |||
2018 | eq->cmd_tail = cmd_tail; | ||
2019 | |||
2020 | spin_unlock_irqrestore(&priv->cmd_lock, irqflags); | ||
2021 | |||
2022 | if (nolds == 0) | ||
2023 | nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL); | ||
2024 | |||
2025 | /* Handle completions. */ | ||
2026 | for (i = 0; i < nolds; i++) | ||
2027 | kfree_skb(olds[i]); | ||
2028 | |||
2029 | /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ | ||
2030 | stats->tx_packets++; | ||
2031 | stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN); | ||
2032 | |||
2033 | /* Make sure the egress timer is scheduled. */ | ||
2034 | tile_net_schedule_egress_timer(info); | ||
2035 | |||
2036 | return NETDEV_TX_OK; | ||
2037 | } | ||
2038 | |||
2039 | |||
2040 | /* | ||
2041 | * Deal with a transmit timeout. | ||
2042 | */ | ||
2043 | static void tile_net_tx_timeout(struct net_device *dev) | ||
2044 | { | ||
2045 | PDEBUG("tile_net_tx_timeout()\n"); | ||
2046 | PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies, | ||
2047 | jiffies - dev->trans_start); | ||
2048 | |||
2049 | /* XXX: ISSUE: This doesn't seem useful for us. */ | ||
2050 | netif_wake_queue(dev); | ||
2051 | } | ||
2052 | |||
2053 | |||
2054 | /* | ||
2055 | * Ioctl commands. | ||
2056 | */ | ||
2057 | static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
2058 | { | ||
2059 | return -EOPNOTSUPP; | ||
2060 | } | ||
2061 | |||
2062 | |||
2063 | /* | ||
2064 | * Get System Network Statistics. | ||
2065 | * | ||
2066 | * Returns the address of the device statistics structure. | ||
2067 | */ | ||
2068 | static struct net_device_stats *tile_net_get_stats(struct net_device *dev) | ||
2069 | { | ||
2070 | struct tile_net_priv *priv = netdev_priv(dev); | ||
2071 | u32 rx_packets = 0; | ||
2072 | u32 tx_packets = 0; | ||
2073 | u32 rx_bytes = 0; | ||
2074 | u32 tx_bytes = 0; | ||
2075 | int i; | ||
2076 | |||
2077 | for_each_online_cpu(i) { | ||
2078 | if (priv->cpu[i]) { | ||
2079 | rx_packets += priv->cpu[i]->stats.rx_packets; | ||
2080 | rx_bytes += priv->cpu[i]->stats.rx_bytes; | ||
2081 | tx_packets += priv->cpu[i]->stats.tx_packets; | ||
2082 | tx_bytes += priv->cpu[i]->stats.tx_bytes; | ||
2083 | } | ||
2084 | } | ||
2085 | |||
2086 | priv->stats.rx_packets = rx_packets; | ||
2087 | priv->stats.rx_bytes = rx_bytes; | ||
2088 | priv->stats.tx_packets = tx_packets; | ||
2089 | priv->stats.tx_bytes = tx_bytes; | ||
2090 | |||
2091 | return &priv->stats; | ||
2092 | } | ||
2093 | |||
2094 | |||
2095 | /* | ||
2096 | * Change the "mtu". | ||
2097 | * | ||
2098 | * The "change_mtu" method is usually not needed. | ||
2099 | * If you need it, it must be like this. | ||
2100 | */ | ||
2101 | static int tile_net_change_mtu(struct net_device *dev, int new_mtu) | ||
2102 | { | ||
2103 | PDEBUG("tile_net_change_mtu()\n"); | ||
2104 | |||
2105 | /* Check ranges. */ | ||
2106 | if ((new_mtu < 68) || (new_mtu > 1500)) | ||
2107 | return -EINVAL; | ||
2108 | |||
2109 | /* Accept the value. */ | ||
2110 | dev->mtu = new_mtu; | ||
2111 | |||
2112 | return 0; | ||
2113 | } | ||
2114 | |||
2115 | |||
2116 | /* | ||
2117 | * Change the Ethernet Address of the NIC. | ||
2118 | * | ||
2119 | * The hypervisor driver does not support changing MAC address. However, | ||
2120 | * the IPP does not do anything with the MAC address, so the address which | ||
2121 | * gets used on outgoing packets, and which is accepted on incoming packets, | ||
2122 | * is completely up to the NetIO program or kernel driver which is actually | ||
2123 | * handling them. | ||
2124 | * | ||
2125 | * Returns 0 on success, negative on failure. | ||
2126 | */ | ||
2127 | static int tile_net_set_mac_address(struct net_device *dev, void *p) | ||
2128 | { | ||
2129 | struct sockaddr *addr = p; | ||
2130 | |||
2131 | if (!is_valid_ether_addr(addr->sa_data)) | ||
2132 | return -EINVAL; | ||
2133 | |||
2134 | /* ISSUE: Note that "dev_addr" is now a pointer. */ | ||
2135 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
2136 | |||
2137 | return 0; | ||
2138 | } | ||
2139 | |||
2140 | |||
2141 | /* | ||
2142 | * Obtain the MAC address from the hypervisor. | ||
2143 | * This must be done before opening the device. | ||
2144 | */ | ||
2145 | static int tile_net_get_mac(struct net_device *dev) | ||
2146 | { | ||
2147 | struct tile_net_priv *priv = netdev_priv(dev); | ||
2148 | |||
2149 | char hv_dev_name[32]; | ||
2150 | int len; | ||
2151 | |||
2152 | __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF }; | ||
2153 | |||
2154 | int ret; | ||
2155 | |||
2156 | /* For example, "xgbe0". */ | ||
2157 | strcpy(hv_dev_name, dev->name); | ||
2158 | len = strlen(hv_dev_name); | ||
2159 | |||
2160 | /* For example, "xgbe/0". */ | ||
2161 | hv_dev_name[len] = hv_dev_name[len - 1]; | ||
2162 | hv_dev_name[len - 1] = '/'; | ||
2163 | len++; | ||
2164 | |||
2165 | /* For example, "xgbe/0/native_hash". */ | ||
2166 | strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native"); | ||
2167 | |||
2168 | /* Get the hypervisor handle for this device. */ | ||
2169 | priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0); | ||
2170 | PDEBUG("hv_dev_open(%s) returned %d %p\n", | ||
2171 | hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl); | ||
2172 | if (priv->hv_devhdl < 0) { | ||
2173 | if (priv->hv_devhdl == HV_ENODEV) | ||
2174 | printk(KERN_DEBUG "Ignoring unconfigured device %s\n", | ||
2175 | hv_dev_name); | ||
2176 | else | ||
2177 | printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n", | ||
2178 | hv_dev_name, priv->hv_devhdl); | ||
2179 | return -1; | ||
2180 | } | ||
2181 | |||
2182 | /* | ||
2183 | * Read the hardware address from the hypervisor. | ||
2184 | * ISSUE: Note that "dev_addr" is now a pointer. | ||
2185 | */ | ||
2186 | offset.bits.class = NETIO_PARAM; | ||
2187 | offset.bits.addr = NETIO_PARAM_MAC; | ||
2188 | ret = hv_dev_pread(priv->hv_devhdl, 0, | ||
2189 | (HV_VirtAddr)dev->dev_addr, dev->addr_len, | ||
2190 | offset.word); | ||
2191 | PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret); | ||
2192 | if (ret <= 0) { | ||
2193 | printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n", | ||
2194 | dev->name); | ||
2195 | /* | ||
2196 | * Since the device is configured by the hypervisor but we | ||
2197 | * can't get its MAC address, we are most likely running | ||
2198 | * the simulator, so let's generate a random MAC address. | ||
2199 | */ | ||
2200 | random_ether_addr(dev->dev_addr); | ||
2201 | } | ||
2202 | |||
2203 | return 0; | ||
2204 | } | ||
2205 | |||
2206 | |||
2207 | static struct net_device_ops tile_net_ops = { | ||
2208 | .ndo_open = tile_net_open, | ||
2209 | .ndo_stop = tile_net_stop, | ||
2210 | .ndo_start_xmit = tile_net_tx, | ||
2211 | .ndo_do_ioctl = tile_net_ioctl, | ||
2212 | .ndo_get_stats = tile_net_get_stats, | ||
2213 | .ndo_change_mtu = tile_net_change_mtu, | ||
2214 | .ndo_tx_timeout = tile_net_tx_timeout, | ||
2215 | .ndo_set_mac_address = tile_net_set_mac_address | ||
2216 | }; | ||
2217 | |||
2218 | |||
2219 | /* | ||
2220 | * The setup function. | ||
2221 | * | ||
2222 | * This uses ether_setup() to assign various fields in dev, including | ||
2223 | * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. | ||
2224 | */ | ||
2225 | static void tile_net_setup(struct net_device *dev) | ||
2226 | { | ||
2227 | PDEBUG("tile_net_setup()\n"); | ||
2228 | |||
2229 | ether_setup(dev); | ||
2230 | |||
2231 | dev->netdev_ops = &tile_net_ops; | ||
2232 | |||
2233 | dev->watchdog_timeo = TILE_NET_TIMEOUT; | ||
2234 | |||
2235 | /* We want lockless xmit. */ | ||
2236 | dev->features |= NETIF_F_LLTX; | ||
2237 | |||
2238 | /* We support hardware tx checksums. */ | ||
2239 | dev->features |= NETIF_F_HW_CSUM; | ||
2240 | |||
2241 | /* We support scatter/gather. */ | ||
2242 | dev->features |= NETIF_F_SG; | ||
2243 | |||
2244 | /* We support TSO. */ | ||
2245 | dev->features |= NETIF_F_TSO; | ||
2246 | |||
2247 | #ifdef TILE_NET_GSO | ||
2248 | /* We support GSO. */ | ||
2249 | dev->features |= NETIF_F_GSO; | ||
2250 | #endif | ||
2251 | |||
2252 | if (hash_default) | ||
2253 | dev->features |= NETIF_F_HIGHDMA; | ||
2254 | |||
2255 | /* ISSUE: We should support NETIF_F_UFO. */ | ||
2256 | |||
2257 | dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; | ||
2258 | |||
2259 | dev->mtu = TILE_NET_MTU; | ||
2260 | } | ||
2261 | |||
2262 | |||
2263 | /* | ||
2264 | * Allocate the device structure, register the device, and obtain the | ||
2265 | * MAC address from the hypervisor. | ||
2266 | */ | ||
2267 | static struct net_device *tile_net_dev_init(const char *name) | ||
2268 | { | ||
2269 | int ret; | ||
2270 | struct net_device *dev; | ||
2271 | struct tile_net_priv *priv; | ||
2272 | struct page *page; | ||
2273 | |||
2274 | /* | ||
2275 | * Allocate the device structure. This allocates "priv", calls | ||
2276 | * tile_net_setup(), and saves "name". Normally, "name" is a | ||
2277 | * template, instantiated by register_netdev(), but not for us. | ||
2278 | */ | ||
2279 | dev = alloc_netdev(sizeof(*priv), name, tile_net_setup); | ||
2280 | if (!dev) { | ||
2281 | pr_err("alloc_netdev(%s) failed\n", name); | ||
2282 | return NULL; | ||
2283 | } | ||
2284 | |||
2285 | priv = netdev_priv(dev); | ||
2286 | |||
2287 | /* Initialize "priv". */ | ||
2288 | |||
2289 | memset(priv, 0, sizeof(*priv)); | ||
2290 | |||
2291 | /* Save "dev" for "tile_net_open_retry()". */ | ||
2292 | priv->dev = dev; | ||
2293 | |||
2294 | INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry); | ||
2295 | |||
2296 | spin_lock_init(&priv->cmd_lock); | ||
2297 | spin_lock_init(&priv->comp_lock); | ||
2298 | |||
2299 | /* Allocate "epp_queue". */ | ||
2300 | BUG_ON(get_order(sizeof(lepp_queue_t)) != 0); | ||
2301 | page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); | ||
2302 | if (!page) { | ||
2303 | free_netdev(dev); | ||
2304 | return NULL; | ||
2305 | } | ||
2306 | priv->epp_queue = page_address(page); | ||
2307 | |||
2308 | /* Register the network device. */ | ||
2309 | ret = register_netdev(dev); | ||
2310 | if (ret) { | ||
2311 | pr_err("register_netdev %s failed %d\n", dev->name, ret); | ||
2312 | free_page((unsigned long)priv->epp_queue); | ||
2313 | free_netdev(dev); | ||
2314 | return NULL; | ||
2315 | } | ||
2316 | |||
2317 | /* Get the MAC address. */ | ||
2318 | ret = tile_net_get_mac(dev); | ||
2319 | if (ret < 0) { | ||
2320 | unregister_netdev(dev); | ||
2321 | free_page((unsigned long)priv->epp_queue); | ||
2322 | free_netdev(dev); | ||
2323 | return NULL; | ||
2324 | } | ||
2325 | |||
2326 | return dev; | ||
2327 | } | ||
2328 | |||
2329 | |||
2330 | /* | ||
2331 | * Module cleanup. | ||
2332 | */ | ||
2333 | static void tile_net_cleanup(void) | ||
2334 | { | ||
2335 | int i; | ||
2336 | |||
2337 | for (i = 0; i < TILE_NET_DEVS; i++) { | ||
2338 | if (tile_net_devs[i]) { | ||
2339 | struct net_device *dev = tile_net_devs[i]; | ||
2340 | struct tile_net_priv *priv = netdev_priv(dev); | ||
2341 | unregister_netdev(dev); | ||
2342 | finv_buffer(priv->epp_queue, PAGE_SIZE); | ||
2343 | free_page((unsigned long)priv->epp_queue); | ||
2344 | free_netdev(dev); | ||
2345 | } | ||
2346 | } | ||
2347 | } | ||
2348 | |||
2349 | |||
2350 | /* | ||
2351 | * Module initialization. | ||
2352 | */ | ||
2353 | static int tile_net_init_module(void) | ||
2354 | { | ||
2355 | pr_info("Tilera IPP Net Driver\n"); | ||
2356 | |||
2357 | tile_net_devs[0] = tile_net_dev_init("xgbe0"); | ||
2358 | tile_net_devs[1] = tile_net_dev_init("xgbe1"); | ||
2359 | tile_net_devs[2] = tile_net_dev_init("gbe0"); | ||
2360 | tile_net_devs[3] = tile_net_dev_init("gbe1"); | ||
2361 | |||
2362 | return 0; | ||
2363 | } | ||
2364 | |||
2365 | |||
2366 | #ifndef MODULE | ||
2367 | /* | ||
2368 | * The "network_cpus" boot argument specifies the cpus that are dedicated | ||
2369 | * to handle ingress packets. | ||
2370 | * | ||
2371 | * The parameter should be in the form "network_cpus=m-n[,x-y]", where | ||
2372 | * m, n, x, y are integer numbers that represent the cpus that can be | ||
2373 | * neither a dedicated cpu nor a dataplane cpu. | ||
2374 | */ | ||
2375 | static int __init network_cpus_setup(char *str) | ||
2376 | { | ||
2377 | int rc = cpulist_parse_crop(str, &network_cpus_map); | ||
2378 | if (rc != 0) { | ||
2379 | pr_warning("network_cpus=%s: malformed cpu list\n", | ||
2380 | str); | ||
2381 | } else { | ||
2382 | |||
2383 | /* Remove dedicated cpus. */ | ||
2384 | cpumask_and(&network_cpus_map, &network_cpus_map, | ||
2385 | cpu_possible_mask); | ||
2386 | |||
2387 | |||
2388 | if (cpumask_empty(&network_cpus_map)) { | ||
2389 | pr_warning("Ignoring network_cpus='%s'.\n", | ||
2390 | str); | ||
2391 | } else { | ||
2392 | char buf[1024]; | ||
2393 | cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map); | ||
2394 | pr_info("Linux network CPUs: %s\n", buf); | ||
2395 | network_cpus_used = true; | ||
2396 | } | ||
2397 | } | ||
2398 | |||
2399 | return 0; | ||
2400 | } | ||
2401 | __setup("network_cpus=", network_cpus_setup); | ||
2402 | #endif | ||
2403 | |||
2404 | |||
2405 | module_init(tile_net_init_module); | ||
2406 | module_exit(tile_net_cleanup); | ||
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index 966b9496a9dd..195406db3bd8 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c | |||
@@ -37,7 +37,7 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) | |||
37 | int addr, eep_start_loc; | 37 | int addr, eep_start_loc; |
38 | eep_data = (u16 *)eep; | 38 | eep_data = (u16 *)eep; |
39 | 39 | ||
40 | if (ah->hw_version.devid == 0x7015) | 40 | if (AR9287_HTC_DEVID(ah)) |
41 | eep_start_loc = AR9287_HTC_EEP_START_LOC; | 41 | eep_start_loc = AR9287_HTC_EEP_START_LOC; |
42 | else | 42 | else |
43 | eep_start_loc = AR9287_EEP_START_LOC; | 43 | eep_start_loc = AR9287_EEP_START_LOC; |
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index f7ec31b4ddd3..dfb6560dab92 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c | |||
@@ -36,8 +36,13 @@ static struct usb_device_id ath9k_hif_usb_ids[] = { | |||
36 | { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */ | 36 | { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */ |
37 | { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */ | 37 | { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */ |
38 | { USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */ | 38 | { USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */ |
39 | { USB_DEVICE(0x13D3, 0x3348) }, /* Azurewave */ | ||
40 | { USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */ | ||
41 | { USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */ | ||
39 | { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */ | 42 | { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */ |
40 | { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */ | 43 | { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */ |
44 | { USB_DEVICE(0x040D, 0x3801) }, /* VIA */ | ||
45 | { USB_DEVICE(0x1668, 0x1200) }, /* Verizon */ | ||
41 | { }, | 46 | { }, |
42 | }; | 47 | }; |
43 | 48 | ||
@@ -806,6 +811,8 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev) | |||
806 | case 0x7010: | 811 | case 0x7010: |
807 | case 0x7015: | 812 | case 0x7015: |
808 | case 0x9018: | 813 | case 0x9018: |
814 | case 0xA704: | ||
815 | case 0x1200: | ||
809 | firm_offset = AR7010_FIRMWARE_TEXT; | 816 | firm_offset = AR7010_FIRMWARE_TEXT; |
810 | break; | 817 | break; |
811 | default: | 818 | default: |
@@ -928,6 +935,8 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface, | |||
928 | case 0x7010: | 935 | case 0x7010: |
929 | case 0x7015: | 936 | case 0x7015: |
930 | case 0x9018: | 937 | case 0x9018: |
938 | case 0xA704: | ||
939 | case 0x1200: | ||
931 | if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202) | 940 | if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202) |
932 | hif_dev->fw_name = FIRMWARE_AR7010_1_1; | 941 | hif_dev->fw_name = FIRMWARE_AR7010_1_1; |
933 | else | 942 | else |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 3d7b97f1b3ae..7c8a38d04561 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c | |||
@@ -249,6 +249,8 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid) | |||
249 | case 0x7010: | 249 | case 0x7010: |
250 | case 0x7015: | 250 | case 0x7015: |
251 | case 0x9018: | 251 | case 0x9018: |
252 | case 0xA704: | ||
253 | case 0x1200: | ||
252 | priv->htc->credits = 45; | 254 | priv->htc->credits = 45; |
253 | break; | 255 | break; |
254 | default: | 256 | default: |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 3d19b5bc937f..29d80ca78393 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | |||
@@ -121,7 +121,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) | |||
121 | tx_hdr.data_type = ATH9K_HTC_NORMAL; | 121 | tx_hdr.data_type = ATH9K_HTC_NORMAL; |
122 | } | 122 | } |
123 | 123 | ||
124 | if (ieee80211_is_data(fc)) { | 124 | if (ieee80211_is_data_qos(fc)) { |
125 | qc = ieee80211_get_qos_ctl(hdr); | 125 | qc = ieee80211_get_qos_ctl(hdr); |
126 | tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK; | 126 | tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK; |
127 | } | 127 | } |
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 6a0d99eff404..92bc5c5f4876 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c | |||
@@ -817,8 +817,6 @@ void ath9k_deinit_device(struct ath_softc *sc) | |||
817 | 817 | ||
818 | ath9k_ps_wakeup(sc); | 818 | ath9k_ps_wakeup(sc); |
819 | 819 | ||
820 | pm_qos_remove_request(&ath9k_pm_qos_req); | ||
821 | |||
822 | wiphy_rfkill_stop_polling(sc->hw->wiphy); | 820 | wiphy_rfkill_stop_polling(sc->hw->wiphy); |
823 | ath_deinit_leds(sc); | 821 | ath_deinit_leds(sc); |
824 | 822 | ||
@@ -832,6 +830,7 @@ void ath9k_deinit_device(struct ath_softc *sc) | |||
832 | } | 830 | } |
833 | 831 | ||
834 | ieee80211_unregister_hw(hw); | 832 | ieee80211_unregister_hw(hw); |
833 | pm_qos_remove_request(&ath9k_pm_qos_req); | ||
835 | ath_rx_cleanup(sc); | 834 | ath_rx_cleanup(sc); |
836 | ath_tx_cleanup(sc); | 835 | ath_tx_cleanup(sc); |
837 | ath9k_deinit_softc(sc); | 836 | ath9k_deinit_softc(sc); |
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index fa05b711e5cd..dddf579aacf1 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h | |||
@@ -866,7 +866,13 @@ | |||
866 | #define AR_DEVID_7010(_ah) \ | 866 | #define AR_DEVID_7010(_ah) \ |
867 | (((_ah)->hw_version.devid == 0x7010) || \ | 867 | (((_ah)->hw_version.devid == 0x7010) || \ |
868 | ((_ah)->hw_version.devid == 0x7015) || \ | 868 | ((_ah)->hw_version.devid == 0x7015) || \ |
869 | ((_ah)->hw_version.devid == 0x9018)) | 869 | ((_ah)->hw_version.devid == 0x9018) || \ |
870 | ((_ah)->hw_version.devid == 0xA704) || \ | ||
871 | ((_ah)->hw_version.devid == 0x1200)) | ||
872 | |||
873 | #define AR9287_HTC_DEVID(_ah) \ | ||
874 | (((_ah)->hw_version.devid == 0x7015) || \ | ||
875 | ((_ah)->hw_version.devid == 0x1200)) | ||
870 | 876 | ||
871 | #define AR_RADIO_SREV_MAJOR 0xf0 | 877 | #define AR_RADIO_SREV_MAJOR 0xf0 |
872 | #define AR_RAD5133_SREV_MAJOR 0xc0 | 878 | #define AR_RAD5133_SREV_MAJOR 0xc0 |
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index 3317039cd28f..7504ed14c725 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c | |||
@@ -553,12 +553,12 @@ static int carl9170_usb_flush(struct ar9170 *ar) | |||
553 | usb_free_urb(urb); | 553 | usb_free_urb(urb); |
554 | } | 554 | } |
555 | 555 | ||
556 | ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, HZ); | 556 | ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, 1000); |
557 | if (ret == 0) | 557 | if (ret == 0) |
558 | err = -ETIMEDOUT; | 558 | err = -ETIMEDOUT; |
559 | 559 | ||
560 | /* lets wait a while until the tx - queues are dried out */ | 560 | /* lets wait a while until the tx - queues are dried out */ |
561 | ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, HZ); | 561 | ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, 1000); |
562 | if (ret == 0) | 562 | if (ret == 0) |
563 | err = -ETIMEDOUT; | 563 | err = -ETIMEDOUT; |
564 | 564 | ||
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c index a38a7bd25f19..b9aedf18a046 100644 --- a/drivers/net/wireless/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/orinoco/orinoco_usb.c | |||
@@ -57,7 +57,6 @@ | |||
57 | #include <linux/fcntl.h> | 57 | #include <linux/fcntl.h> |
58 | #include <linux/spinlock.h> | 58 | #include <linux/spinlock.h> |
59 | #include <linux/list.h> | 59 | #include <linux/list.h> |
60 | #include <linux/smp_lock.h> | ||
61 | #include <linux/usb.h> | 60 | #include <linux/usb.h> |
62 | #include <linux/timer.h> | 61 | #include <linux/timer.h> |
63 | 62 | ||
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c index cce00ed81f37..af212c6a6158 100644 --- a/drivers/parisc/eisa_eeprom.c +++ b/drivers/parisc/eisa_eeprom.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
25 | #include <linux/miscdevice.h> | 25 | #include <linux/miscdevice.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/smp_lock.h> | ||
28 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
29 | #include <asm/io.h> | 28 | #include <asm/io.h> |
30 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index f01e344cf4bd..98e6fdf34d30 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -49,6 +49,7 @@ obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o | |||
49 | obj-$(CONFIG_X86_VISWS) += setup-irq.o | 49 | obj-$(CONFIG_X86_VISWS) += setup-irq.o |
50 | obj-$(CONFIG_MN10300) += setup-bus.o | 50 | obj-$(CONFIG_MN10300) += setup-bus.o |
51 | obj-$(CONFIG_MICROBLAZE) += setup-bus.o | 51 | obj-$(CONFIG_MICROBLAZE) += setup-bus.o |
52 | obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o | ||
52 | 53 | ||
53 | # | 54 | # |
54 | # ACPI Related PCI FW Functions | 55 | # ACPI Related PCI FW Functions |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 5624db8c9ad0..003170ea2e39 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -64,17 +64,57 @@ void pci_bus_remove_resources(struct pci_bus *bus) | |||
64 | } | 64 | } |
65 | } | 65 | } |
66 | 66 | ||
67 | static bool pci_bus_resource_better(struct resource *res1, bool pos1, | ||
68 | struct resource *res2, bool pos2) | ||
69 | { | ||
70 | /* If exactly one is positive decode, always prefer that one */ | ||
71 | if (pos1 != pos2) | ||
72 | return pos1 ? true : false; | ||
73 | |||
74 | /* Prefer the one that contains the highest address */ | ||
75 | if (res1->end != res2->end) | ||
76 | return (res1->end > res2->end) ? true : false; | ||
77 | |||
78 | /* Otherwise, prefer the one with highest "center of gravity" */ | ||
79 | if (res1->start != res2->start) | ||
80 | return (res1->start > res2->start) ? true : false; | ||
81 | |||
82 | /* Otherwise, choose one arbitrarily (but consistently) */ | ||
83 | return (res1 > res2) ? true : false; | ||
84 | } | ||
85 | |||
86 | static bool pci_bus_resource_positive(struct pci_bus *bus, struct resource *res) | ||
87 | { | ||
88 | struct pci_bus_resource *bus_res; | ||
89 | |||
90 | /* | ||
91 | * This relies on the fact that pci_bus.resource[] refers to P2P or | ||
92 | * CardBus bridge base/limit registers, which are always positively | ||
93 | * decoded. The pci_bus.resources list contains host bridge or | ||
94 | * subtractively decoded resources. | ||
95 | */ | ||
96 | list_for_each_entry(bus_res, &bus->resources, list) { | ||
97 | if (bus_res->res == res) | ||
98 | return (bus_res->flags & PCI_SUBTRACTIVE_DECODE) ? | ||
99 | false : true; | ||
100 | } | ||
101 | return true; | ||
102 | } | ||
103 | |||
67 | /* | 104 | /* |
68 | * Find the highest-address bus resource below the cursor "res". If the | 105 | * Find the next-best bus resource after the cursor "res". If the cursor is |
69 | * cursor is NULL, return the highest resource. | 106 | * NULL, return the best resource. "Best" means that we prefer positive |
107 | * decode regions over subtractive decode, then those at higher addresses. | ||
70 | */ | 108 | */ |
71 | static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus, | 109 | static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus, |
72 | unsigned int type, | 110 | unsigned int type, |
73 | struct resource *res) | 111 | struct resource *res) |
74 | { | 112 | { |
113 | bool res_pos, r_pos, prev_pos = false; | ||
75 | struct resource *r, *prev = NULL; | 114 | struct resource *r, *prev = NULL; |
76 | int i; | 115 | int i; |
77 | 116 | ||
117 | res_pos = pci_bus_resource_positive(bus, res); | ||
78 | pci_bus_for_each_resource(bus, r, i) { | 118 | pci_bus_for_each_resource(bus, r, i) { |
79 | if (!r) | 119 | if (!r) |
80 | continue; | 120 | continue; |
@@ -82,26 +122,14 @@ static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus, | |||
82 | if ((r->flags & IORESOURCE_TYPE_BITS) != type) | 122 | if ((r->flags & IORESOURCE_TYPE_BITS) != type) |
83 | continue; | 123 | continue; |
84 | 124 | ||
85 | /* If this resource is at or past the cursor, skip it */ | 125 | r_pos = pci_bus_resource_positive(bus, r); |
86 | if (res) { | 126 | if (!res || pci_bus_resource_better(res, res_pos, r, r_pos)) { |
87 | if (r == res) | 127 | if (!prev || pci_bus_resource_better(r, r_pos, |
88 | continue; | 128 | prev, prev_pos)) { |
89 | if (r->end > res->end) | 129 | prev = r; |
90 | continue; | 130 | prev_pos = r_pos; |
91 | if (r->end == res->end && r->start > res->start) | 131 | } |
92 | continue; | ||
93 | } | 132 | } |
94 | |||
95 | if (!prev) | ||
96 | prev = r; | ||
97 | |||
98 | /* | ||
99 | * A small resource is higher than a large one that ends at | ||
100 | * the same address. | ||
101 | */ | ||
102 | if (r->end > prev->end || | ||
103 | (r->end == prev->end && r->start > prev->start)) | ||
104 | prev = r; | ||
105 | } | 133 | } |
106 | 134 | ||
107 | return prev; | 135 | return prev; |
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index 5becbdee4027..2850e64dedae 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c | |||
@@ -276,6 +276,12 @@ int __init ibmphp_access_ebda (void) | |||
276 | 276 | ||
277 | for (;;) { | 277 | for (;;) { |
278 | offset = next_offset; | 278 | offset = next_offset; |
279 | |||
280 | /* Make sure what we read is still in the mapped section */ | ||
281 | if (WARN(offset > (ebda_sz * 1024 - 4), | ||
282 | "ibmphp_ebda: next read is beyond ebda_sz\n")) | ||
283 | break; | ||
284 | |||
279 | next_offset = readw (io_mem + offset); /* offset of next blk */ | 285 | next_offset = readw (io_mem + offset); /* offset of next blk */ |
280 | 286 | ||
281 | offset += 2; | 287 | offset += 2; |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index b5a7d9bfcb24..63d5042f2079 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -705,17 +705,21 @@ void pci_remove_legacy_files(struct pci_bus *b) | |||
705 | 705 | ||
706 | #ifdef HAVE_PCI_MMAP | 706 | #ifdef HAVE_PCI_MMAP |
707 | 707 | ||
708 | int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) | 708 | int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, |
709 | enum pci_mmap_api mmap_api) | ||
709 | { | 710 | { |
710 | unsigned long nr, start, size; | 711 | unsigned long nr, start, size, pci_start; |
711 | 712 | ||
713 | if (pci_resource_len(pdev, resno) == 0) | ||
714 | return 0; | ||
712 | nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 715 | nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
713 | start = vma->vm_pgoff; | 716 | start = vma->vm_pgoff; |
714 | size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; | 717 | size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; |
715 | if (start < size && size - start >= nr) | 718 | pci_start = (mmap_api == PCI_MMAP_PROCFS) ? |
719 | pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0; | ||
720 | if (start >= pci_start && start < pci_start + size && | ||
721 | start + nr <= pci_start + size) | ||
716 | return 1; | 722 | return 1; |
717 | WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n", | ||
718 | current->comm, start, start+nr, pci_name(pdev), resno, size); | ||
719 | return 0; | 723 | return 0; |
720 | } | 724 | } |
721 | 725 | ||
@@ -745,8 +749,15 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, | |||
745 | if (i >= PCI_ROM_RESOURCE) | 749 | if (i >= PCI_ROM_RESOURCE) |
746 | return -ENODEV; | 750 | return -ENODEV; |
747 | 751 | ||
748 | if (!pci_mmap_fits(pdev, i, vma)) | 752 | if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) { |
753 | WARN(1, "process \"%s\" tried to map 0x%08lx bytes " | ||
754 | "at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n", | ||
755 | current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff, | ||
756 | pci_name(pdev), i, | ||
757 | (u64)pci_resource_start(pdev, i), | ||
758 | (u64)pci_resource_len(pdev, i)); | ||
749 | return -EINVAL; | 759 | return -EINVAL; |
760 | } | ||
750 | 761 | ||
751 | /* pci_mmap_page_range() expects the same kind of entry as coming | 762 | /* pci_mmap_page_range() expects the same kind of entry as coming |
752 | * from /proc/bus/pci/ which is a "user visible" value. If this is | 763 | * from /proc/bus/pci/ which is a "user visible" value. If this is |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e98c8104297b..710c8a29be0d 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -1007,6 +1007,18 @@ static int __pci_enable_device_flags(struct pci_dev *dev, | |||
1007 | int err; | 1007 | int err; |
1008 | int i, bars = 0; | 1008 | int i, bars = 0; |
1009 | 1009 | ||
1010 | /* | ||
1011 | * Power state could be unknown at this point, either due to a fresh | ||
1012 | * boot or a device removal call. So get the current power state | ||
1013 | * so that things like MSI message writing will behave as expected | ||
1014 | * (e.g. if the device really is in D0 at enable time). | ||
1015 | */ | ||
1016 | if (dev->pm_cap) { | ||
1017 | u16 pmcsr; | ||
1018 | pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); | ||
1019 | dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); | ||
1020 | } | ||
1021 | |||
1010 | if (atomic_add_return(1, &dev->enable_cnt) > 1) | 1022 | if (atomic_add_return(1, &dev->enable_cnt) > 1) |
1011 | return 0; /* already enabled */ | 1023 | return 0; /* already enabled */ |
1012 | 1024 | ||
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index f5c7c382765f..7d33f6673868 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -22,8 +22,13 @@ extern void pci_remove_firmware_label_files(struct pci_dev *pdev); | |||
22 | #endif | 22 | #endif |
23 | extern void pci_cleanup_rom(struct pci_dev *dev); | 23 | extern void pci_cleanup_rom(struct pci_dev *dev); |
24 | #ifdef HAVE_PCI_MMAP | 24 | #ifdef HAVE_PCI_MMAP |
25 | enum pci_mmap_api { | ||
26 | PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */ | ||
27 | PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */ | ||
28 | }; | ||
25 | extern int pci_mmap_fits(struct pci_dev *pdev, int resno, | 29 | extern int pci_mmap_fits(struct pci_dev *pdev, int resno, |
26 | struct vm_area_struct *vma); | 30 | struct vm_area_struct *vmai, |
31 | enum pci_mmap_api mmap_api); | ||
27 | #endif | 32 | #endif |
28 | int pci_probe_reset_function(struct pci_dev *dev); | 33 | int pci_probe_reset_function(struct pci_dev *dev); |
29 | 34 | ||
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 297b72c880a1..27911b55c2a5 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/proc_fs.h> | 11 | #include <linux/proc_fs.h> |
12 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
13 | #include <linux/smp_lock.h> | ||
14 | #include <linux/capability.h> | 13 | #include <linux/capability.h> |
15 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
16 | #include <asm/byteorder.h> | 15 | #include <asm/byteorder.h> |
@@ -257,7 +256,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) | |||
257 | 256 | ||
258 | /* Make sure the caller is mapping a real resource for this device */ | 257 | /* Make sure the caller is mapping a real resource for this device */ |
259 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { | 258 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { |
260 | if (pci_mmap_fits(dev, i, vma)) | 259 | if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS)) |
261 | break; | 260 | break; |
262 | } | 261 | } |
263 | 262 | ||
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index f5c63fe9db5c..6f9350cabbd5 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -2136,6 +2136,24 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, | |||
2136 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, | 2136 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, |
2137 | quirk_unhide_mch_dev6); | 2137 | quirk_unhide_mch_dev6); |
2138 | 2138 | ||
2139 | #ifdef CONFIG_TILE | ||
2140 | /* | ||
2141 | * The Tilera TILEmpower platform needs to set the link speed | ||
2142 | * to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed | ||
2143 | * setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe | ||
2144 | * capability register of the PEX8624 PCIe switch. The switch | ||
2145 | * supports link speed auto negotiation, but falsely sets | ||
2146 | * the link speed to 5GT/s. | ||
2147 | */ | ||
2148 | static void __devinit quirk_tile_plx_gen1(struct pci_dev *dev) | ||
2149 | { | ||
2150 | if (tile_plx_gen1) { | ||
2151 | pci_write_config_dword(dev, 0x98, 0x1); | ||
2152 | mdelay(50); | ||
2153 | } | ||
2154 | } | ||
2155 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1); | ||
2156 | #endif /* CONFIG_TILE */ | ||
2139 | 2157 | ||
2140 | #ifdef CONFIG_PCI_MSI | 2158 | #ifdef CONFIG_PCI_MSI |
2141 | /* Some chipsets do not support MSI. We cannot easily rely on setting | 2159 | /* Some chipsets do not support MSI. We cannot easily rely on setting |
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c index e73ebefdf3e0..315b3112aca8 100644 --- a/drivers/pnp/isapnp/proc.c +++ b/drivers/pnp/isapnp/proc.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/isapnp.h> | 21 | #include <linux/isapnp.h> |
22 | #include <linux/proc_fs.h> | 22 | #include <linux/proc_fs.h> |
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/smp_lock.h> | ||
25 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
26 | 25 | ||
27 | extern struct pnp_protocol isapnp_protocol; | 26 | extern struct pnp_protocol isapnp_protocol; |
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 5efbd5990ff8..06e41ed93230 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c | |||
@@ -761,7 +761,7 @@ err_unmap: | |||
761 | clk_put(rtc->clk); | 761 | clk_put(rtc->clk); |
762 | iounmap(rtc->regbase); | 762 | iounmap(rtc->regbase); |
763 | err_badmap: | 763 | err_badmap: |
764 | release_resource(rtc->res); | 764 | release_mem_region(rtc->res->start, rtc->regsize); |
765 | err_badres: | 765 | err_badres: |
766 | kfree(rtc); | 766 | kfree(rtc); |
767 | 767 | ||
@@ -786,7 +786,7 @@ static int __exit sh_rtc_remove(struct platform_device *pdev) | |||
786 | } | 786 | } |
787 | 787 | ||
788 | iounmap(rtc->regbase); | 788 | iounmap(rtc->regbase); |
789 | release_resource(rtc->res); | 789 | release_mem_region(rtc->res->start, rtc->regsize); |
790 | 790 | ||
791 | clk_disable(rtc->clk); | 791 | clk_disable(rtc->clk); |
792 | clk_put(rtc->clk); | 792 | clk_put(rtc->clk); |
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index c71d89dba302..83b4615a3b62 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/device.h> | 17 | #include <linux/device.h> |
18 | #include <linux/poll.h> | 18 | #include <linux/poll.h> |
19 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
20 | #include <linux/smp_lock.h> | ||
21 | #include <linux/err.h> | 20 | #include <linux/err.h> |
22 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
23 | 22 | ||
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index eb28fb01a38a..f6489eb7e976 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/list.h> | 14 | #include <linux/list.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/smp_lock.h> | ||
18 | 17 | ||
19 | #include <asm/compat.h> | 18 | #include <asm/compat.h> |
20 | #include <asm/ccwdev.h> | 19 | #include <asm/ccwdev.h> |
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index 883e2db02bd3..e090a307fdee 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | #include <linux/proc_fs.h> | 18 | #include <linux/proc_fs.h> |
19 | #include <linux/mtio.h> | 19 | #include <linux/mtio.h> |
20 | #include <linux/smp_lock.h> | ||
21 | #include <linux/compat.h> | 20 | #include <linux/compat.h> |
22 | 21 | ||
23 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 6c408670e08d..b3a3e8e8656e 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
@@ -209,29 +209,79 @@ tape_state_set(struct tape_device *device, enum tape_state newstate) | |||
209 | wake_up(&device->state_change_wq); | 209 | wake_up(&device->state_change_wq); |
210 | } | 210 | } |
211 | 211 | ||
212 | struct tape_med_state_work_data { | ||
213 | struct tape_device *device; | ||
214 | enum tape_medium_state state; | ||
215 | struct work_struct work; | ||
216 | }; | ||
217 | |||
218 | static void | ||
219 | tape_med_state_work_handler(struct work_struct *work) | ||
220 | { | ||
221 | static char env_state_loaded[] = "MEDIUM_STATE=LOADED"; | ||
222 | static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED"; | ||
223 | struct tape_med_state_work_data *p = | ||
224 | container_of(work, struct tape_med_state_work_data, work); | ||
225 | struct tape_device *device = p->device; | ||
226 | char *envp[] = { NULL, NULL }; | ||
227 | |||
228 | switch (p->state) { | ||
229 | case MS_UNLOADED: | ||
230 | pr_info("%s: The tape cartridge has been successfully " | ||
231 | "unloaded\n", dev_name(&device->cdev->dev)); | ||
232 | envp[0] = env_state_unloaded; | ||
233 | kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); | ||
234 | break; | ||
235 | case MS_LOADED: | ||
236 | pr_info("%s: A tape cartridge has been mounted\n", | ||
237 | dev_name(&device->cdev->dev)); | ||
238 | envp[0] = env_state_loaded; | ||
239 | kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); | ||
240 | break; | ||
241 | default: | ||
242 | break; | ||
243 | } | ||
244 | tape_put_device(device); | ||
245 | kfree(p); | ||
246 | } | ||
247 | |||
248 | static void | ||
249 | tape_med_state_work(struct tape_device *device, enum tape_medium_state state) | ||
250 | { | ||
251 | struct tape_med_state_work_data *p; | ||
252 | |||
253 | p = kzalloc(sizeof(*p), GFP_ATOMIC); | ||
254 | if (p) { | ||
255 | INIT_WORK(&p->work, tape_med_state_work_handler); | ||
256 | p->device = tape_get_device(device); | ||
257 | p->state = state; | ||
258 | schedule_work(&p->work); | ||
259 | } | ||
260 | } | ||
261 | |||
212 | void | 262 | void |
213 | tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) | 263 | tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) |
214 | { | 264 | { |
215 | if (device->medium_state == newstate) | 265 | enum tape_medium_state oldstate; |
266 | |||
267 | oldstate = device->medium_state; | ||
268 | if (oldstate == newstate) | ||
216 | return; | 269 | return; |
270 | device->medium_state = newstate; | ||
217 | switch(newstate){ | 271 | switch(newstate){ |
218 | case MS_UNLOADED: | 272 | case MS_UNLOADED: |
219 | device->tape_generic_status |= GMT_DR_OPEN(~0); | 273 | device->tape_generic_status |= GMT_DR_OPEN(~0); |
220 | if (device->medium_state == MS_LOADED) | 274 | if (oldstate == MS_LOADED) |
221 | pr_info("%s: The tape cartridge has been successfully " | 275 | tape_med_state_work(device, MS_UNLOADED); |
222 | "unloaded\n", dev_name(&device->cdev->dev)); | ||
223 | break; | 276 | break; |
224 | case MS_LOADED: | 277 | case MS_LOADED: |
225 | device->tape_generic_status &= ~GMT_DR_OPEN(~0); | 278 | device->tape_generic_status &= ~GMT_DR_OPEN(~0); |
226 | if (device->medium_state == MS_UNLOADED) | 279 | if (oldstate == MS_UNLOADED) |
227 | pr_info("%s: A tape cartridge has been mounted\n", | 280 | tape_med_state_work(device, MS_LOADED); |
228 | dev_name(&device->cdev->dev)); | ||
229 | break; | 281 | break; |
230 | default: | 282 | default: |
231 | // print nothing | ||
232 | break; | 283 | break; |
233 | } | 284 | } |
234 | device->medium_state = newstate; | ||
235 | wake_up(&device->state_change_wq); | 285 | wake_up(&device->state_change_wq); |
236 | } | 286 | } |
237 | 287 | ||
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 9f661426e4a1..c837d7419a6a 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/kmod.h> | 30 | #include <linux/kmod.h> |
31 | #include <linux/cdev.h> | 31 | #include <linux/cdev.h> |
32 | #include <linux/device.h> | 32 | #include <linux/device.h> |
33 | #include <linux/smp_lock.h> | ||
34 | #include <linux/string.h> | 33 | #include <linux/string.h> |
35 | 34 | ||
36 | MODULE_AUTHOR | 35 | MODULE_AUTHOR |
@@ -249,27 +248,25 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, | |||
249 | char cp_command[80]; | 248 | char cp_command[80]; |
250 | char cp_response[160]; | 249 | char cp_response[160]; |
251 | char *onoff, *qid_string; | 250 | char *onoff, *qid_string; |
251 | int rc; | ||
252 | 252 | ||
253 | memset(cp_command, 0x00, sizeof(cp_command)); | 253 | onoff = ((action == 1) ? "ON" : "OFF"); |
254 | memset(cp_response, 0x00, sizeof(cp_response)); | ||
255 | |||
256 | onoff = ((action == 1) ? "ON" : "OFF"); | ||
257 | qid_string = ((recording_class_AB == 1) ? " QID * " : ""); | 254 | qid_string = ((recording_class_AB == 1) ? " QID * " : ""); |
258 | 255 | ||
259 | /* | 256 | /* |
260 | * The recording commands needs to be called with option QID | 257 | * The recording commands needs to be called with option QID |
261 | * for guests that have previlege classes A or B. | 258 | * for guests that have previlege classes A or B. |
262 | * Purging has to be done as separate step, because recording | 259 | * Purging has to be done as separate step, because recording |
263 | * can't be switched on as long as records are on the queue. | 260 | * can't be switched on as long as records are on the queue. |
264 | * Doing both at the same time doesn't work. | 261 | * Doing both at the same time doesn't work. |
265 | */ | 262 | */ |
266 | 263 | if (purge && (action == 1)) { | |
267 | if (purge) { | 264 | memset(cp_command, 0x00, sizeof(cp_command)); |
265 | memset(cp_response, 0x00, sizeof(cp_response)); | ||
268 | snprintf(cp_command, sizeof(cp_command), | 266 | snprintf(cp_command, sizeof(cp_command), |
269 | "RECORDING %s PURGE %s", | 267 | "RECORDING %s PURGE %s", |
270 | logptr->recording_name, | 268 | logptr->recording_name, |
271 | qid_string); | 269 | qid_string); |
272 | |||
273 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); | 270 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); |
274 | } | 271 | } |
275 | 272 | ||
@@ -279,19 +276,33 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, | |||
279 | logptr->recording_name, | 276 | logptr->recording_name, |
280 | onoff, | 277 | onoff, |
281 | qid_string); | 278 | qid_string); |
282 | |||
283 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); | 279 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); |
284 | /* The recording command will usually answer with 'Command complete' | 280 | /* The recording command will usually answer with 'Command complete' |
285 | * on success, but when the specific service was never connected | 281 | * on success, but when the specific service was never connected |
286 | * before then there might be an additional informational message | 282 | * before then there might be an additional informational message |
287 | * 'HCPCRC8072I Recording entry not found' before the | 283 | * 'HCPCRC8072I Recording entry not found' before the |
288 | * 'Command complete'. So I use strstr rather then the strncmp. | 284 | * 'Command complete'. So I use strstr rather then the strncmp. |
289 | */ | 285 | */ |
290 | if (strstr(cp_response,"Command complete")) | 286 | if (strstr(cp_response,"Command complete")) |
291 | return 0; | 287 | rc = 0; |
292 | else | 288 | else |
293 | return -EIO; | 289 | rc = -EIO; |
290 | /* | ||
291 | * If we turn recording off, we have to purge any remaining records | ||
292 | * afterwards, as a large number of queued records may impact z/VM | ||
293 | * performance. | ||
294 | */ | ||
295 | if (purge && (action == 0)) { | ||
296 | memset(cp_command, 0x00, sizeof(cp_command)); | ||
297 | memset(cp_response, 0x00, sizeof(cp_response)); | ||
298 | snprintf(cp_command, sizeof(cp_command), | ||
299 | "RECORDING %s PURGE %s", | ||
300 | logptr->recording_name, | ||
301 | qid_string); | ||
302 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); | ||
303 | } | ||
294 | 304 | ||
305 | return rc; | ||
295 | } | 306 | } |
296 | 307 | ||
297 | 308 | ||
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 1de672f21037..f7e4ae6bf15a 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c | |||
@@ -13,7 +13,6 @@ | |||
13 | 13 | ||
14 | #include <linux/cdev.h> | 14 | #include <linux/cdev.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/smp_lock.h> | ||
17 | 16 | ||
18 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
19 | #include <asm/cio.h> | 18 | #include <asm/cio.h> |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 2ff8a22d4257..e8391b89eff4 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -1455,7 +1455,16 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) | |||
1455 | break; | 1455 | break; |
1456 | case IO_SCH_UNREG_ATTACH: | 1456 | case IO_SCH_UNREG_ATTACH: |
1457 | case IO_SCH_UNREG: | 1457 | case IO_SCH_UNREG: |
1458 | if (cdev) | 1458 | if (!cdev) |
1459 | break; | ||
1460 | if (cdev->private->state == DEV_STATE_SENSE_ID) { | ||
1461 | /* | ||
1462 | * Note: delayed work triggered by this event | ||
1463 | * and repeated calls to sch_event are synchronized | ||
1464 | * by the above check for work_pending(cdev). | ||
1465 | */ | ||
1466 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | ||
1467 | } else | ||
1459 | ccw_device_set_notoper(cdev); | 1468 | ccw_device_set_notoper(cdev); |
1460 | break; | 1469 | break; |
1461 | case IO_SCH_NOP: | 1470 | case IO_SCH_NOP: |
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index f5221749d180..7fca9c10ffcf 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/proc_fs.h> | 35 | #include <linux/proc_fs.h> |
36 | #include <linux/seq_file.h> | 36 | #include <linux/seq_file.h> |
37 | #include <linux/compat.h> | 37 | #include <linux/compat.h> |
38 | #include <linux/smp_lock.h> | ||
39 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
40 | #include <asm/atomic.h> | 39 | #include <asm/atomic.h> |
41 | #include <asm/uaccess.h> | 40 | #include <asm/uaccess.h> |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 50286d8707f3..6bd2dbc4c316 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -76,7 +76,7 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) | |||
76 | scpnt->scsi_done(scpnt); | 76 | scpnt->scsi_done(scpnt); |
77 | } | 77 | } |
78 | 78 | ||
79 | static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, | 79 | static int zfcp_scsi_queuecommand_lck(struct scsi_cmnd *scpnt, |
80 | void (*done) (struct scsi_cmnd *)) | 80 | void (*done) (struct scsi_cmnd *)) |
81 | { | 81 | { |
82 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); | 82 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); |
@@ -127,6 +127,8 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, | |||
127 | return ret; | 127 | return ret; |
128 | } | 128 | } |
129 | 129 | ||
130 | static DEF_SCSI_QCMD(zfcp_scsi_queuecommand) | ||
131 | |||
130 | static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) | 132 | static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) |
131 | { | 133 | { |
132 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); | 134 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); |
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index fcf08b3f52c1..b7bd5b0cc7aa 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
@@ -1765,7 +1765,7 @@ out: | |||
1765 | } /* End twa_scsi_eh_reset() */ | 1765 | } /* End twa_scsi_eh_reset() */ |
1766 | 1766 | ||
1767 | /* This is the main scsi queue function to handle scsi opcodes */ | 1767 | /* This is the main scsi queue function to handle scsi opcodes */ |
1768 | static int twa_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 1768 | static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) |
1769 | { | 1769 | { |
1770 | int request_id, retval; | 1770 | int request_id, retval; |
1771 | TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; | 1771 | TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; |
@@ -1812,6 +1812,8 @@ out: | |||
1812 | return retval; | 1812 | return retval; |
1813 | } /* End twa_scsi_queue() */ | 1813 | } /* End twa_scsi_queue() */ |
1814 | 1814 | ||
1815 | static DEF_SCSI_QCMD(twa_scsi_queue) | ||
1816 | |||
1815 | /* This function hands scsi cdb's to the firmware */ | 1817 | /* This function hands scsi cdb's to the firmware */ |
1816 | static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg) | 1818 | static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg) |
1817 | { | 1819 | { |
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index 6a95d111d207..13e39e1fdfe2 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c | |||
@@ -1501,7 +1501,7 @@ out: | |||
1501 | } /* End twl_scsi_eh_reset() */ | 1501 | } /* End twl_scsi_eh_reset() */ |
1502 | 1502 | ||
1503 | /* This is the main scsi queue function to handle scsi opcodes */ | 1503 | /* This is the main scsi queue function to handle scsi opcodes */ |
1504 | static int twl_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 1504 | static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) |
1505 | { | 1505 | { |
1506 | int request_id, retval; | 1506 | int request_id, retval; |
1507 | TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; | 1507 | TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; |
@@ -1536,6 +1536,8 @@ out: | |||
1536 | return retval; | 1536 | return retval; |
1537 | } /* End twl_scsi_queue() */ | 1537 | } /* End twl_scsi_queue() */ |
1538 | 1538 | ||
1539 | static DEF_SCSI_QCMD(twl_scsi_queue) | ||
1540 | |||
1539 | /* This function tells the controller to shut down */ | 1541 | /* This function tells the controller to shut down */ |
1540 | static void __twl_shutdown(TW_Device_Extension *tw_dev) | 1542 | static void __twl_shutdown(TW_Device_Extension *tw_dev) |
1541 | { | 1543 | { |
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index b1125341f4c8..7fe96ff60c58 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c | |||
@@ -1947,7 +1947,7 @@ static int tw_scsiop_test_unit_ready_complete(TW_Device_Extension *tw_dev, int r | |||
1947 | } /* End tw_scsiop_test_unit_ready_complete() */ | 1947 | } /* End tw_scsiop_test_unit_ready_complete() */ |
1948 | 1948 | ||
1949 | /* This is the main scsi queue function to handle scsi opcodes */ | 1949 | /* This is the main scsi queue function to handle scsi opcodes */ |
1950 | static int tw_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 1950 | static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) |
1951 | { | 1951 | { |
1952 | unsigned char *command = SCpnt->cmnd; | 1952 | unsigned char *command = SCpnt->cmnd; |
1953 | int request_id = 0; | 1953 | int request_id = 0; |
@@ -2023,6 +2023,8 @@ static int tw_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd | |||
2023 | return retval; | 2023 | return retval; |
2024 | } /* End tw_scsi_queue() */ | 2024 | } /* End tw_scsi_queue() */ |
2025 | 2025 | ||
2026 | static DEF_SCSI_QCMD(tw_scsi_queue) | ||
2027 | |||
2026 | /* This function is the interrupt service routine */ | 2028 | /* This function is the interrupt service routine */ |
2027 | static irqreturn_t tw_interrupt(int irq, void *dev_instance) | 2029 | static irqreturn_t tw_interrupt(int irq, void *dev_instance) |
2028 | { | 2030 | { |
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 89fc1c8af86b..f672491774eb 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c | |||
@@ -167,7 +167,7 @@ MODULE_LICENSE("GPL"); | |||
167 | #include "53c700_d.h" | 167 | #include "53c700_d.h" |
168 | 168 | ||
169 | 169 | ||
170 | STATIC int NCR_700_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); | 170 | STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *); |
171 | STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt); | 171 | STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt); |
172 | STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt); | 172 | STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt); |
173 | STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt); | 173 | STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt); |
@@ -1749,8 +1749,8 @@ NCR_700_intr(int irq, void *dev_id) | |||
1749 | return IRQ_RETVAL(handled); | 1749 | return IRQ_RETVAL(handled); |
1750 | } | 1750 | } |
1751 | 1751 | ||
1752 | STATIC int | 1752 | static int |
1753 | NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)) | 1753 | NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)) |
1754 | { | 1754 | { |
1755 | struct NCR_700_Host_Parameters *hostdata = | 1755 | struct NCR_700_Host_Parameters *hostdata = |
1756 | (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; | 1756 | (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; |
@@ -1904,6 +1904,8 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)) | |||
1904 | return 0; | 1904 | return 0; |
1905 | } | 1905 | } |
1906 | 1906 | ||
1907 | STATIC DEF_SCSI_QCMD(NCR_700_queuecommand) | ||
1908 | |||
1907 | STATIC int | 1909 | STATIC int |
1908 | NCR_700_abort(struct scsi_cmnd * SCp) | 1910 | NCR_700_abort(struct scsi_cmnd * SCp) |
1909 | { | 1911 | { |
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index fc0b4b81d552..f66c33b9ab41 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c | |||
@@ -2807,7 +2807,7 @@ static int BusLogic_host_reset(struct scsi_cmnd * SCpnt) | |||
2807 | Outgoing Mailbox for execution by the associated Host Adapter. | 2807 | Outgoing Mailbox for execution by the associated Host Adapter. |
2808 | */ | 2808 | */ |
2809 | 2809 | ||
2810 | static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRoutine) (struct scsi_cmnd *)) | 2810 | static int BusLogic_QueueCommand_lck(struct scsi_cmnd *Command, void (*CompletionRoutine) (struct scsi_cmnd *)) |
2811 | { | 2811 | { |
2812 | struct BusLogic_HostAdapter *HostAdapter = (struct BusLogic_HostAdapter *) Command->device->host->hostdata; | 2812 | struct BusLogic_HostAdapter *HostAdapter = (struct BusLogic_HostAdapter *) Command->device->host->hostdata; |
2813 | struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[Command->device->id]; | 2813 | struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[Command->device->id]; |
@@ -2994,6 +2994,7 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou | |||
2994 | return 0; | 2994 | return 0; |
2995 | } | 2995 | } |
2996 | 2996 | ||
2997 | static DEF_SCSI_QCMD(BusLogic_QueueCommand) | ||
2997 | 2998 | ||
2998 | #if 0 | 2999 | #if 0 |
2999 | /* | 3000 | /* |
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h index 73f237a1ed94..649fcb31f26d 100644 --- a/drivers/scsi/BusLogic.h +++ b/drivers/scsi/BusLogic.h | |||
@@ -1319,7 +1319,7 @@ static inline void BusLogic_IncrementSizeBucket(BusLogic_CommandSizeBuckets_T Co | |||
1319 | */ | 1319 | */ |
1320 | 1320 | ||
1321 | static const char *BusLogic_DriverInfo(struct Scsi_Host *); | 1321 | static const char *BusLogic_DriverInfo(struct Scsi_Host *); |
1322 | static int BusLogic_QueueCommand(struct scsi_cmnd *, void (*CompletionRoutine) (struct scsi_cmnd *)); | 1322 | static int BusLogic_QueueCommand(struct Scsi_Host *h, struct scsi_cmnd *); |
1323 | static int BusLogic_BIOSDiskParameters(struct scsi_device *, struct block_device *, sector_t, int *); | 1323 | static int BusLogic_BIOSDiskParameters(struct scsi_device *, struct block_device *, sector_t, int *); |
1324 | static int BusLogic_ProcDirectoryInfo(struct Scsi_Host *, char *, char **, off_t, int, int); | 1324 | static int BusLogic_ProcDirectoryInfo(struct Scsi_Host *, char *, char **, off_t, int, int); |
1325 | static int BusLogic_SlaveConfigure(struct scsi_device *); | 1325 | static int BusLogic_SlaveConfigure(struct scsi_device *); |
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 5d2f148889ad..9a5629f94f95 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c | |||
@@ -952,7 +952,7 @@ static void NCR5380_exit(struct Scsi_Host *instance) | |||
952 | * Locks: host lock taken by caller | 952 | * Locks: host lock taken by caller |
953 | */ | 953 | */ |
954 | 954 | ||
955 | static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) | 955 | static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) |
956 | { | 956 | { |
957 | struct Scsi_Host *instance = cmd->device->host; | 957 | struct Scsi_Host *instance = cmd->device->host; |
958 | struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; | 958 | struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; |
@@ -1021,6 +1021,7 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) | |||
1021 | return 0; | 1021 | return 0; |
1022 | } | 1022 | } |
1023 | 1023 | ||
1024 | static DEF_SCSI_QCMD(NCR5380_queue_command) | ||
1024 | 1025 | ||
1025 | /** | 1026 | /** |
1026 | * NCR5380_main - NCR state machines | 1027 | * NCR5380_main - NCR state machines |
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index bdc468c9e1d9..fd40a32b1f6f 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h | |||
@@ -313,7 +313,7 @@ static void NCR5380_print(struct Scsi_Host *instance); | |||
313 | #endif | 313 | #endif |
314 | static int NCR5380_abort(Scsi_Cmnd * cmd); | 314 | static int NCR5380_abort(Scsi_Cmnd * cmd); |
315 | static int NCR5380_bus_reset(Scsi_Cmnd * cmd); | 315 | static int NCR5380_bus_reset(Scsi_Cmnd * cmd); |
316 | static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)); | 316 | static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *); |
317 | static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance, | 317 | static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance, |
318 | char *buffer, char **start, off_t offset, int length, int inout); | 318 | char *buffer, char **start, off_t offset, int length, int inout); |
319 | 319 | ||
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c index 6961f78742ae..c91888a0a23c 100644 --- a/drivers/scsi/NCR53c406a.c +++ b/drivers/scsi/NCR53c406a.c | |||
@@ -693,7 +693,7 @@ static void wait_intr(void) | |||
693 | } | 693 | } |
694 | #endif | 694 | #endif |
695 | 695 | ||
696 | static int NCR53c406a_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) | 696 | static int NCR53c406a_queue_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) |
697 | { | 697 | { |
698 | int i; | 698 | int i; |
699 | 699 | ||
@@ -726,6 +726,8 @@ static int NCR53c406a_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) | |||
726 | return 0; | 726 | return 0; |
727 | } | 727 | } |
728 | 728 | ||
729 | static DEF_SCSI_QCMD(NCR53c406a_queue) | ||
730 | |||
729 | static int NCR53c406a_host_reset(Scsi_Cmnd * SCpnt) | 731 | static int NCR53c406a_host_reset(Scsi_Cmnd * SCpnt) |
730 | { | 732 | { |
731 | DEB(printk("NCR53c406a_reset called\n")); | 733 | DEB(printk("NCR53c406a_reset called\n")); |
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c index dbbc601948e5..dc5ac6e528c4 100644 --- a/drivers/scsi/a100u2w.c +++ b/drivers/scsi/a100u2w.c | |||
@@ -911,7 +911,7 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc | |||
911 | * queue the command down to the controller | 911 | * queue the command down to the controller |
912 | */ | 912 | */ |
913 | 913 | ||
914 | static int inia100_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) | 914 | static int inia100_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) |
915 | { | 915 | { |
916 | struct orc_scb *scb; | 916 | struct orc_scb *scb; |
917 | struct orc_host *host; /* Point to Host adapter control block */ | 917 | struct orc_host *host; /* Point to Host adapter control block */ |
@@ -930,6 +930,8 @@ static int inia100_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd | |||
930 | return 0; | 930 | return 0; |
931 | } | 931 | } |
932 | 932 | ||
933 | static DEF_SCSI_QCMD(inia100_queue) | ||
934 | |||
933 | /***************************************************************************** | 935 | /***************************************************************************** |
934 | Function name : inia100_abort | 936 | Function name : inia100_abort |
935 | Description : Abort a queued command. | 937 | Description : Abort a queued command. |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 29c0ed1cf507..2c93d9496d62 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -248,7 +248,7 @@ static struct aac_driver_ident aac_drivers[] = { | |||
248 | * TODO: unify with aac_scsi_cmd(). | 248 | * TODO: unify with aac_scsi_cmd(). |
249 | */ | 249 | */ |
250 | 250 | ||
251 | static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 251 | static int aac_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
252 | { | 252 | { |
253 | struct Scsi_Host *host = cmd->device->host; | 253 | struct Scsi_Host *host = cmd->device->host; |
254 | struct aac_dev *dev = (struct aac_dev *)host->hostdata; | 254 | struct aac_dev *dev = (struct aac_dev *)host->hostdata; |
@@ -267,6 +267,8 @@ static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd | |||
267 | return (aac_scsi_cmd(cmd) ? FAILED : 0); | 267 | return (aac_scsi_cmd(cmd) ? FAILED : 0); |
268 | } | 268 | } |
269 | 269 | ||
270 | static DEF_SCSI_QCMD(aac_queuecommand) | ||
271 | |||
270 | /** | 272 | /** |
271 | * aac_info - Returns the host adapter name | 273 | * aac_info - Returns the host adapter name |
272 | * @shost: Scsi host to report on | 274 | * @shost: Scsi host to report on |
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index 0ec3da6f3e12..081c6de92bc5 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c | |||
@@ -9500,7 +9500,7 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp) | |||
9500 | * in the 'scp' result field. | 9500 | * in the 'scp' result field. |
9501 | */ | 9501 | */ |
9502 | static int | 9502 | static int |
9503 | advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) | 9503 | advansys_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) |
9504 | { | 9504 | { |
9505 | struct Scsi_Host *shost = scp->device->host; | 9505 | struct Scsi_Host *shost = scp->device->host; |
9506 | int asc_res, result = 0; | 9506 | int asc_res, result = 0; |
@@ -9525,6 +9525,8 @@ advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) | |||
9525 | return result; | 9525 | return result; |
9526 | } | 9526 | } |
9527 | 9527 | ||
9528 | static DEF_SCSI_QCMD(advansys_queuecommand) | ||
9529 | |||
9528 | static ushort __devinit AscGetEisaChipCfg(PortAddr iop_base) | 9530 | static ushort __devinit AscGetEisaChipCfg(PortAddr iop_base) |
9529 | { | 9531 | { |
9530 | PortAddr eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) | | 9532 | PortAddr eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) | |
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 8eab8587ff21..c5169f01c1cd 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c | |||
@@ -1056,7 +1056,7 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete, | |||
1056 | * queue a command | 1056 | * queue a command |
1057 | * | 1057 | * |
1058 | */ | 1058 | */ |
1059 | static int aha152x_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) | 1059 | static int aha152x_queue_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) |
1060 | { | 1060 | { |
1061 | #if 0 | 1061 | #if 0 |
1062 | if(*SCpnt->cmnd == REQUEST_SENSE) { | 1062 | if(*SCpnt->cmnd == REQUEST_SENSE) { |
@@ -1070,6 +1070,8 @@ static int aha152x_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) | |||
1070 | return aha152x_internal_queue(SCpnt, NULL, 0, done); | 1070 | return aha152x_internal_queue(SCpnt, NULL, 0, done); |
1071 | } | 1071 | } |
1072 | 1072 | ||
1073 | static DEF_SCSI_QCMD(aha152x_queue) | ||
1074 | |||
1073 | 1075 | ||
1074 | /* | 1076 | /* |
1075 | * | 1077 | * |
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index 4f785f254c1f..195823a51aab 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c | |||
@@ -558,7 +558,7 @@ static void aha1542_intr_handle(struct Scsi_Host *shost) | |||
558 | }; | 558 | }; |
559 | } | 559 | } |
560 | 560 | ||
561 | static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) | 561 | static int aha1542_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) |
562 | { | 562 | { |
563 | unchar ahacmd = CMD_START_SCSI; | 563 | unchar ahacmd = CMD_START_SCSI; |
564 | unchar direction; | 564 | unchar direction; |
@@ -718,6 +718,8 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) | |||
718 | return 0; | 718 | return 0; |
719 | } | 719 | } |
720 | 720 | ||
721 | static DEF_SCSI_QCMD(aha1542_queuecommand) | ||
722 | |||
721 | /* Initialize mailboxes */ | 723 | /* Initialize mailboxes */ |
722 | static void setup_mailboxes(int bse, struct Scsi_Host *shpnt) | 724 | static void setup_mailboxes(int bse, struct Scsi_Host *shpnt) |
723 | { | 725 | { |
diff --git a/drivers/scsi/aha1542.h b/drivers/scsi/aha1542.h index 1db538552d56..b871d2b57f93 100644 --- a/drivers/scsi/aha1542.h +++ b/drivers/scsi/aha1542.h | |||
@@ -132,7 +132,7 @@ struct ccb { /* Command Control Block 5.3 */ | |||
132 | }; | 132 | }; |
133 | 133 | ||
134 | static int aha1542_detect(struct scsi_host_template *); | 134 | static int aha1542_detect(struct scsi_host_template *); |
135 | static int aha1542_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); | 135 | static int aha1542_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
136 | static int aha1542_bus_reset(Scsi_Cmnd * SCpnt); | 136 | static int aha1542_bus_reset(Scsi_Cmnd * SCpnt); |
137 | static int aha1542_dev_reset(Scsi_Cmnd * SCpnt); | 137 | static int aha1542_dev_reset(Scsi_Cmnd * SCpnt); |
138 | static int aha1542_host_reset(Scsi_Cmnd * SCpnt); | 138 | static int aha1542_host_reset(Scsi_Cmnd * SCpnt); |
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c index 0107a4cc3331..d058f1ab82b5 100644 --- a/drivers/scsi/aha1740.c +++ b/drivers/scsi/aha1740.c | |||
@@ -331,7 +331,7 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id) | |||
331 | return IRQ_RETVAL(handled); | 331 | return IRQ_RETVAL(handled); |
332 | } | 332 | } |
333 | 333 | ||
334 | static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *)) | 334 | static int aha1740_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *)) |
335 | { | 335 | { |
336 | unchar direction; | 336 | unchar direction; |
337 | unchar *cmd = (unchar *) SCpnt->cmnd; | 337 | unchar *cmd = (unchar *) SCpnt->cmnd; |
@@ -503,6 +503,8 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *)) | |||
503 | return 0; | 503 | return 0; |
504 | } | 504 | } |
505 | 505 | ||
506 | static DEF_SCSI_QCMD(aha1740_queuecommand) | ||
507 | |||
506 | /* Query the board for its irq_level and irq_type. Nothing else matters | 508 | /* Query the board for its irq_level and irq_type. Nothing else matters |
507 | in enhanced mode on an EISA bus. */ | 509 | in enhanced mode on an EISA bus. */ |
508 | 510 | ||
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index 88ad8482ef59..25d066624476 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c | |||
@@ -573,7 +573,7 @@ ahd_linux_info(struct Scsi_Host *host) | |||
573 | * Queue an SCB to the controller. | 573 | * Queue an SCB to the controller. |
574 | */ | 574 | */ |
575 | static int | 575 | static int |
576 | ahd_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) | 576 | ahd_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) |
577 | { | 577 | { |
578 | struct ahd_softc *ahd; | 578 | struct ahd_softc *ahd; |
579 | struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device); | 579 | struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device); |
@@ -588,6 +588,8 @@ ahd_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) | |||
588 | return rtn; | 588 | return rtn; |
589 | } | 589 | } |
590 | 590 | ||
591 | static DEF_SCSI_QCMD(ahd_linux_queue) | ||
592 | |||
591 | static struct scsi_target ** | 593 | static struct scsi_target ** |
592 | ahd_linux_target_in_softc(struct scsi_target *starget) | 594 | ahd_linux_target_in_softc(struct scsi_target *starget) |
593 | { | 595 | { |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index aeea7a61478e..4a359bb307c6 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c | |||
@@ -528,7 +528,7 @@ ahc_linux_info(struct Scsi_Host *host) | |||
528 | * Queue an SCB to the controller. | 528 | * Queue an SCB to the controller. |
529 | */ | 529 | */ |
530 | static int | 530 | static int |
531 | ahc_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) | 531 | ahc_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) |
532 | { | 532 | { |
533 | struct ahc_softc *ahc; | 533 | struct ahc_softc *ahc; |
534 | struct ahc_linux_device *dev = scsi_transport_device_data(cmd->device); | 534 | struct ahc_linux_device *dev = scsi_transport_device_data(cmd->device); |
@@ -548,6 +548,8 @@ ahc_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) | |||
548 | return rtn; | 548 | return rtn; |
549 | } | 549 | } |
550 | 550 | ||
551 | static DEF_SCSI_QCMD(ahc_linux_queue) | ||
552 | |||
551 | static inline struct scsi_target ** | 553 | static inline struct scsi_target ** |
552 | ahc_linux_target_in_softc(struct scsi_target *starget) | 554 | ahc_linux_target_in_softc(struct scsi_target *starget) |
553 | { | 555 | { |
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c index aee73fafccc8..4ff60a08df0f 100644 --- a/drivers/scsi/aic7xxx_old.c +++ b/drivers/scsi/aic7xxx_old.c | |||
@@ -10234,7 +10234,7 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd, | |||
10234 | * Description: | 10234 | * Description: |
10235 | * Queue a SCB to the controller. | 10235 | * Queue a SCB to the controller. |
10236 | *-F*************************************************************************/ | 10236 | *-F*************************************************************************/ |
10237 | static int aic7xxx_queue(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) | 10237 | static int aic7xxx_queue_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) |
10238 | { | 10238 | { |
10239 | struct aic7xxx_host *p; | 10239 | struct aic7xxx_host *p; |
10240 | struct aic7xxx_scb *scb; | 10240 | struct aic7xxx_scb *scb; |
@@ -10292,6 +10292,8 @@ static int aic7xxx_queue(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) | |||
10292 | return (0); | 10292 | return (0); |
10293 | } | 10293 | } |
10294 | 10294 | ||
10295 | static DEF_SCSI_QCMD(aic7xxx_queue) | ||
10296 | |||
10295 | /*+F************************************************************************* | 10297 | /*+F************************************************************************* |
10296 | * Function: | 10298 | * Function: |
10297 | * aic7xxx_bus_device_reset | 10299 | * aic7xxx_bus_device_reset |
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 05a78e515a24..17e3df4f016f 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
@@ -85,8 +85,7 @@ static int arcmsr_abort(struct scsi_cmnd *); | |||
85 | static int arcmsr_bus_reset(struct scsi_cmnd *); | 85 | static int arcmsr_bus_reset(struct scsi_cmnd *); |
86 | static int arcmsr_bios_param(struct scsi_device *sdev, | 86 | static int arcmsr_bios_param(struct scsi_device *sdev, |
87 | struct block_device *bdev, sector_t capacity, int *info); | 87 | struct block_device *bdev, sector_t capacity, int *info); |
88 | static int arcmsr_queue_command(struct scsi_cmnd *cmd, | 88 | static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
89 | void (*done) (struct scsi_cmnd *)); | ||
90 | static int arcmsr_probe(struct pci_dev *pdev, | 89 | static int arcmsr_probe(struct pci_dev *pdev, |
91 | const struct pci_device_id *id); | 90 | const struct pci_device_id *id); |
92 | static void arcmsr_remove(struct pci_dev *pdev); | 91 | static void arcmsr_remove(struct pci_dev *pdev); |
@@ -2081,7 +2080,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, | |||
2081 | } | 2080 | } |
2082 | } | 2081 | } |
2083 | 2082 | ||
2084 | static int arcmsr_queue_command(struct scsi_cmnd *cmd, | 2083 | static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd, |
2085 | void (* done)(struct scsi_cmnd *)) | 2084 | void (* done)(struct scsi_cmnd *)) |
2086 | { | 2085 | { |
2087 | struct Scsi_Host *host = cmd->device->host; | 2086 | struct Scsi_Host *host = cmd->device->host; |
@@ -2124,6 +2123,8 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd, | |||
2124 | return 0; | 2123 | return 0; |
2125 | } | 2124 | } |
2126 | 2125 | ||
2126 | static DEF_SCSI_QCMD(arcmsr_queue_command) | ||
2127 | |||
2127 | static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb) | 2128 | static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb) |
2128 | { | 2129 | { |
2129 | struct MessageUnit_A __iomem *reg = acb->pmuA; | 2130 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 918ccf818757..ec166726b314 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c | |||
@@ -2511,7 +2511,7 @@ acornscsi_intr(int irq, void *dev_id) | |||
2511 | * done - function called on completion, with pointer to command descriptor | 2511 | * done - function called on completion, with pointer to command descriptor |
2512 | * Returns : 0, or < 0 on error. | 2512 | * Returns : 0, or < 0 on error. |
2513 | */ | 2513 | */ |
2514 | int acornscsi_queuecmd(struct scsi_cmnd *SCpnt, | 2514 | static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt, |
2515 | void (*done)(struct scsi_cmnd *)) | 2515 | void (*done)(struct scsi_cmnd *)) |
2516 | { | 2516 | { |
2517 | AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; | 2517 | AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; |
@@ -2561,6 +2561,8 @@ int acornscsi_queuecmd(struct scsi_cmnd *SCpnt, | |||
2561 | return 0; | 2561 | return 0; |
2562 | } | 2562 | } |
2563 | 2563 | ||
2564 | DEF_SCSI_QCMD(acornscsi_queuecmd) | ||
2565 | |||
2564 | /* | 2566 | /* |
2565 | * Prototype: void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1, struct scsi_cmnd **SCpntp2, int result) | 2567 | * Prototype: void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1, struct scsi_cmnd **SCpntp2, int result) |
2566 | * Purpose : pass a result to *SCpntp1, and check if *SCpntp1 = *SCpntp2 | 2568 | * Purpose : pass a result to *SCpntp1, and check if *SCpntp1 = *SCpntp2 |
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 9e71ac611146..2b2ce21e227e 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c | |||
@@ -2198,7 +2198,7 @@ no_command: | |||
2198 | * Returns: 0 on success, else error. | 2198 | * Returns: 0 on success, else error. |
2199 | * Notes: io_request_lock is held, interrupts are disabled. | 2199 | * Notes: io_request_lock is held, interrupts are disabled. |
2200 | */ | 2200 | */ |
2201 | int fas216_queue_command(struct scsi_cmnd *SCpnt, | 2201 | static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt, |
2202 | void (*done)(struct scsi_cmnd *)) | 2202 | void (*done)(struct scsi_cmnd *)) |
2203 | { | 2203 | { |
2204 | FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; | 2204 | FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; |
@@ -2240,6 +2240,8 @@ int fas216_queue_command(struct scsi_cmnd *SCpnt, | |||
2240 | return result; | 2240 | return result; |
2241 | } | 2241 | } |
2242 | 2242 | ||
2243 | DEF_SCSI_QCMD(fas216_queue_command) | ||
2244 | |||
2243 | /** | 2245 | /** |
2244 | * fas216_internal_done - trigger restart of a waiting thread in fas216_noqueue_command | 2246 | * fas216_internal_done - trigger restart of a waiting thread in fas216_noqueue_command |
2245 | * @SCpnt: Command to wake | 2247 | * @SCpnt: Command to wake |
@@ -2263,7 +2265,7 @@ static void fas216_internal_done(struct scsi_cmnd *SCpnt) | |||
2263 | * Returns: scsi result code. | 2265 | * Returns: scsi result code. |
2264 | * Notes: io_request_lock is held, interrupts are disabled. | 2266 | * Notes: io_request_lock is held, interrupts are disabled. |
2265 | */ | 2267 | */ |
2266 | int fas216_noqueue_command(struct scsi_cmnd *SCpnt, | 2268 | static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt, |
2267 | void (*done)(struct scsi_cmnd *)) | 2269 | void (*done)(struct scsi_cmnd *)) |
2268 | { | 2270 | { |
2269 | FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; | 2271 | FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; |
@@ -2277,7 +2279,7 @@ int fas216_noqueue_command(struct scsi_cmnd *SCpnt, | |||
2277 | BUG_ON(info->scsi.irq != NO_IRQ); | 2279 | BUG_ON(info->scsi.irq != NO_IRQ); |
2278 | 2280 | ||
2279 | info->internal_done = 0; | 2281 | info->internal_done = 0; |
2280 | fas216_queue_command(SCpnt, fas216_internal_done); | 2282 | fas216_queue_command_lck(SCpnt, fas216_internal_done); |
2281 | 2283 | ||
2282 | /* | 2284 | /* |
2283 | * This wastes time, since we can't return until the command is | 2285 | * This wastes time, since we can't return until the command is |
@@ -2310,6 +2312,8 @@ int fas216_noqueue_command(struct scsi_cmnd *SCpnt, | |||
2310 | return 0; | 2312 | return 0; |
2311 | } | 2313 | } |
2312 | 2314 | ||
2315 | DEF_SCSI_QCMD(fas216_noqueue_command) | ||
2316 | |||
2313 | /* | 2317 | /* |
2314 | * Error handler timeout function. Indicate that we timed out, | 2318 | * Error handler timeout function. Indicate that we timed out, |
2315 | * and wake up any error handler process so it can continue. | 2319 | * and wake up any error handler process so it can continue. |
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h index b65f4cf0eec9..f30f8d659dc4 100644 --- a/drivers/scsi/arm/fas216.h +++ b/drivers/scsi/arm/fas216.h | |||
@@ -331,23 +331,21 @@ extern int fas216_init (struct Scsi_Host *instance); | |||
331 | */ | 331 | */ |
332 | extern int fas216_add (struct Scsi_Host *instance, struct device *dev); | 332 | extern int fas216_add (struct Scsi_Host *instance, struct device *dev); |
333 | 333 | ||
334 | /* Function: int fas216_queue_command(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 334 | /* Function: int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt) |
335 | * Purpose : queue a command for adapter to process. | 335 | * Purpose : queue a command for adapter to process. |
336 | * Params : SCpnt - Command to queue | 336 | * Params : h - host adapter |
337 | * done - done function to call once command is complete | 337 | * : SCpnt - Command to queue |
338 | * Returns : 0 - success, else error | 338 | * Returns : 0 - success, else error |
339 | */ | 339 | */ |
340 | extern int fas216_queue_command(struct scsi_cmnd *, | 340 | extern int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt); |
341 | void (*done)(struct scsi_cmnd *)); | ||
342 | 341 | ||
343 | /* Function: int fas216_noqueue_command(istruct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 342 | /* Function: int fas216_noqueue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt) |
344 | * Purpose : queue a command for adapter to process, and process it to completion. | 343 | * Purpose : queue a command for adapter to process, and process it to completion. |
345 | * Params : SCpnt - Command to queue | 344 | * Params : h - host adapter |
346 | * done - done function to call once command is complete | 345 | * : SCpnt - Command to queue |
347 | * Returns : 0 - success, else error | 346 | * Returns : 0 - success, else error |
348 | */ | 347 | */ |
349 | extern int fas216_noqueue_command(struct scsi_cmnd *, | 348 | extern int fas216_noqueue_command(struct Scsi_Host *, struct scsi_cmnd *); |
350 | void (*done)(struct scsi_cmnd *)); | ||
351 | 349 | ||
352 | /* Function: irqreturn_t fas216_intr (FAS216_Info *info) | 350 | /* Function: irqreturn_t fas216_intr (FAS216_Info *info) |
353 | * Purpose : handle interrupts from the interface to progress a command | 351 | * Purpose : handle interrupts from the interface to progress a command |
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index 158ebc3644d8..88b2928b4d3b 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c | |||
@@ -910,7 +910,7 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags) | |||
910 | * | 910 | * |
911 | */ | 911 | */ |
912 | 912 | ||
913 | static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) | 913 | static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) |
914 | { | 914 | { |
915 | SETUP_HOSTDATA(cmd->device->host); | 915 | SETUP_HOSTDATA(cmd->device->host); |
916 | Scsi_Cmnd *tmp; | 916 | Scsi_Cmnd *tmp; |
@@ -1022,6 +1022,8 @@ static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) | |||
1022 | return 0; | 1022 | return 0; |
1023 | } | 1023 | } |
1024 | 1024 | ||
1025 | static DEF_SCSI_QCMD(NCR5380_queue_command) | ||
1026 | |||
1025 | /* | 1027 | /* |
1026 | * Function : NCR5380_main (void) | 1028 | * Function : NCR5380_main (void) |
1027 | * | 1029 | * |
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index ad7a23aef0ec..3e8658e2f154 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c | |||
@@ -572,23 +572,6 @@ static void falcon_get_lock(void) | |||
572 | } | 572 | } |
573 | 573 | ||
574 | 574 | ||
575 | /* This is the wrapper function for NCR5380_queue_command(). It just | ||
576 | * tries to get the lock on the ST-DMA (see above) and then calls the | ||
577 | * original function. | ||
578 | */ | ||
579 | |||
580 | #if 0 | ||
581 | int atari_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) | ||
582 | { | ||
583 | /* falcon_get_lock(); | ||
584 | * ++guenther: moved to NCR5380_queue_command() to prevent | ||
585 | * race condition, see there for an explanation. | ||
586 | */ | ||
587 | return NCR5380_queue_command(cmd, done); | ||
588 | } | ||
589 | #endif | ||
590 | |||
591 | |||
592 | int __init atari_scsi_detect(struct scsi_host_template *host) | 575 | int __init atari_scsi_detect(struct scsi_host_template *host) |
593 | { | 576 | { |
594 | static int called = 0; | 577 | static int called = 0; |
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index ab5bdda6903e..76029d570beb 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c | |||
@@ -605,7 +605,7 @@ handled: | |||
605 | * | 605 | * |
606 | * Queue a command to the ATP queue. Called with the host lock held. | 606 | * Queue a command to the ATP queue. Called with the host lock held. |
607 | */ | 607 | */ |
608 | static int atp870u_queuecommand(struct scsi_cmnd * req_p, | 608 | static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p, |
609 | void (*done) (struct scsi_cmnd *)) | 609 | void (*done) (struct scsi_cmnd *)) |
610 | { | 610 | { |
611 | unsigned char c; | 611 | unsigned char c; |
@@ -694,6 +694,8 @@ static int atp870u_queuecommand(struct scsi_cmnd * req_p, | |||
694 | return 0; | 694 | return 0; |
695 | } | 695 | } |
696 | 696 | ||
697 | static DEF_SCSI_QCMD(atp870u_queuecommand) | ||
698 | |||
697 | /** | 699 | /** |
698 | * send_s870 - send a command to the controller | 700 | * send_s870 - send a command to the controller |
699 | * @host: host | 701 | * @host: host |
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 8daa716739d1..8ca967dee66d 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c | |||
@@ -30,8 +30,7 @@ DEFINE_IDR(bfad_im_port_index); | |||
30 | struct scsi_transport_template *bfad_im_scsi_transport_template; | 30 | struct scsi_transport_template *bfad_im_scsi_transport_template; |
31 | struct scsi_transport_template *bfad_im_scsi_vport_transport_template; | 31 | struct scsi_transport_template *bfad_im_scsi_vport_transport_template; |
32 | static void bfad_im_itnim_work_handler(struct work_struct *work); | 32 | static void bfad_im_itnim_work_handler(struct work_struct *work); |
33 | static int bfad_im_queuecommand(struct scsi_cmnd *cmnd, | 33 | static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd); |
34 | void (*done)(struct scsi_cmnd *)); | ||
35 | static int bfad_im_slave_alloc(struct scsi_device *sdev); | 34 | static int bfad_im_slave_alloc(struct scsi_device *sdev); |
36 | static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, | 35 | static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, |
37 | struct bfad_itnim_s *itnim); | 36 | struct bfad_itnim_s *itnim); |
@@ -1120,7 +1119,7 @@ bfad_im_itnim_work_handler(struct work_struct *work) | |||
1120 | * Scsi_Host template entry, queue a SCSI command to the BFAD. | 1119 | * Scsi_Host template entry, queue a SCSI command to the BFAD. |
1121 | */ | 1120 | */ |
1122 | static int | 1121 | static int |
1123 | bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | 1122 | bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) |
1124 | { | 1123 | { |
1125 | struct bfad_im_port_s *im_port = | 1124 | struct bfad_im_port_s *im_port = |
1126 | (struct bfad_im_port_s *) cmnd->device->host->hostdata[0]; | 1125 | (struct bfad_im_port_s *) cmnd->device->host->hostdata[0]; |
@@ -1187,6 +1186,8 @@ out_fail_cmd: | |||
1187 | return 0; | 1186 | return 0; |
1188 | } | 1187 | } |
1189 | 1188 | ||
1189 | static DEF_SCSI_QCMD(bfad_im_queuecommand) | ||
1190 | |||
1190 | void | 1191 | void |
1191 | bfad_os_rport_online_wait(struct bfad_s *bfad) | 1192 | bfad_os_rport_online_wait(struct bfad_s *bfad) |
1192 | { | 1193 | { |
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 54f50b07dac7..8f1b5c8bf903 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c | |||
@@ -1080,7 +1080,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, | |||
1080 | * and is expected to be held on return. | 1080 | * and is expected to be held on return. |
1081 | * | 1081 | * |
1082 | **/ | 1082 | **/ |
1083 | static int dc395x_queue_command(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 1083 | static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
1084 | { | 1084 | { |
1085 | struct DeviceCtlBlk *dcb; | 1085 | struct DeviceCtlBlk *dcb; |
1086 | struct ScsiReqBlk *srb; | 1086 | struct ScsiReqBlk *srb; |
@@ -1154,6 +1154,7 @@ complete: | |||
1154 | return 0; | 1154 | return 0; |
1155 | } | 1155 | } |
1156 | 1156 | ||
1157 | static DEF_SCSI_QCMD(dc395x_queue_command) | ||
1157 | 1158 | ||
1158 | /* | 1159 | /* |
1159 | * Return the disk geometry for the given SCSI device. | 1160 | * Return the disk geometry for the given SCSI device. |
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 23dec0063385..cffcb108ac96 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c | |||
@@ -423,7 +423,7 @@ static int adpt_slave_configure(struct scsi_device * device) | |||
423 | return 0; | 423 | return 0; |
424 | } | 424 | } |
425 | 425 | ||
426 | static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) | 426 | static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) |
427 | { | 427 | { |
428 | adpt_hba* pHba = NULL; | 428 | adpt_hba* pHba = NULL; |
429 | struct adpt_device* pDev = NULL; /* dpt per device information */ | 429 | struct adpt_device* pDev = NULL; /* dpt per device information */ |
@@ -491,6 +491,8 @@ static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) | |||
491 | return adpt_scsi_to_i2o(pHba, cmd, pDev); | 491 | return adpt_scsi_to_i2o(pHba, cmd, pDev); |
492 | } | 492 | } |
493 | 493 | ||
494 | static DEF_SCSI_QCMD(adpt_queue) | ||
495 | |||
494 | static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev, | 496 | static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev, |
495 | sector_t capacity, int geom[]) | 497 | sector_t capacity, int geom[]) |
496 | { | 498 | { |
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h index 337746d46043..beded716f93f 100644 --- a/drivers/scsi/dpti.h +++ b/drivers/scsi/dpti.h | |||
@@ -29,7 +29,7 @@ | |||
29 | */ | 29 | */ |
30 | 30 | ||
31 | static int adpt_detect(struct scsi_host_template * sht); | 31 | static int adpt_detect(struct scsi_host_template * sht); |
32 | static int adpt_queue(struct scsi_cmnd * cmd, void (*cmdcomplete) (struct scsi_cmnd *)); | 32 | static int adpt_queue(struct Scsi_Host *h, struct scsi_cmnd * cmd); |
33 | static int adpt_abort(struct scsi_cmnd * cmd); | 33 | static int adpt_abort(struct scsi_cmnd * cmd); |
34 | static int adpt_reset(struct scsi_cmnd* cmd); | 34 | static int adpt_reset(struct scsi_cmnd* cmd); |
35 | static int adpt_release(struct Scsi_Host *host); | 35 | static int adpt_release(struct Scsi_Host *host); |
diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h index 0b205f8c7326..cdc621204b66 100644 --- a/drivers/scsi/dtc.h +++ b/drivers/scsi/dtc.h | |||
@@ -36,7 +36,7 @@ static int dtc_abort(Scsi_Cmnd *); | |||
36 | static int dtc_biosparam(struct scsi_device *, struct block_device *, | 36 | static int dtc_biosparam(struct scsi_device *, struct block_device *, |
37 | sector_t, int*); | 37 | sector_t, int*); |
38 | static int dtc_detect(struct scsi_host_template *); | 38 | static int dtc_detect(struct scsi_host_template *); |
39 | static int dtc_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); | 39 | static int dtc_queue_command(struct Scsi_Host *, struct scsi_cmnd *); |
40 | static int dtc_bus_reset(Scsi_Cmnd *); | 40 | static int dtc_bus_reset(Scsi_Cmnd *); |
41 | 41 | ||
42 | #ifndef CMD_PER_LUN | 42 | #ifndef CMD_PER_LUN |
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c index d1c31378f6da..53925ac178fd 100644 --- a/drivers/scsi/eata.c +++ b/drivers/scsi/eata.c | |||
@@ -505,8 +505,7 @@ | |||
505 | 505 | ||
506 | static int eata2x_detect(struct scsi_host_template *); | 506 | static int eata2x_detect(struct scsi_host_template *); |
507 | static int eata2x_release(struct Scsi_Host *); | 507 | static int eata2x_release(struct Scsi_Host *); |
508 | static int eata2x_queuecommand(struct scsi_cmnd *, | 508 | static int eata2x_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
509 | void (*done) (struct scsi_cmnd *)); | ||
510 | static int eata2x_eh_abort(struct scsi_cmnd *); | 509 | static int eata2x_eh_abort(struct scsi_cmnd *); |
511 | static int eata2x_eh_host_reset(struct scsi_cmnd *); | 510 | static int eata2x_eh_host_reset(struct scsi_cmnd *); |
512 | static int eata2x_bios_param(struct scsi_device *, struct block_device *, | 511 | static int eata2x_bios_param(struct scsi_device *, struct block_device *, |
@@ -1758,7 +1757,7 @@ static void scsi_to_dev_dir(unsigned int i, struct hostdata *ha) | |||
1758 | 1757 | ||
1759 | } | 1758 | } |
1760 | 1759 | ||
1761 | static int eata2x_queuecommand(struct scsi_cmnd *SCpnt, | 1760 | static int eata2x_queuecommand_lck(struct scsi_cmnd *SCpnt, |
1762 | void (*done) (struct scsi_cmnd *)) | 1761 | void (*done) (struct scsi_cmnd *)) |
1763 | { | 1762 | { |
1764 | struct Scsi_Host *shost = SCpnt->device->host; | 1763 | struct Scsi_Host *shost = SCpnt->device->host; |
@@ -1843,6 +1842,8 @@ static int eata2x_queuecommand(struct scsi_cmnd *SCpnt, | |||
1843 | return 0; | 1842 | return 0; |
1844 | } | 1843 | } |
1845 | 1844 | ||
1845 | static DEF_SCSI_QCMD(eata2x_queuecommand) | ||
1846 | |||
1846 | static int eata2x_eh_abort(struct scsi_cmnd *SCarg) | 1847 | static int eata2x_eh_abort(struct scsi_cmnd *SCarg) |
1847 | { | 1848 | { |
1848 | struct Scsi_Host *shost = SCarg->device->host; | 1849 | struct Scsi_Host *shost = SCarg->device->host; |
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c index 60886c19065e..4a9641e69f54 100644 --- a/drivers/scsi/eata_pio.c +++ b/drivers/scsi/eata_pio.c | |||
@@ -335,7 +335,7 @@ static inline unsigned int eata_pio_send_command(unsigned long base, unsigned ch | |||
335 | return 0; | 335 | return 0; |
336 | } | 336 | } |
337 | 337 | ||
338 | static int eata_pio_queue(struct scsi_cmnd *cmd, | 338 | static int eata_pio_queue_lck(struct scsi_cmnd *cmd, |
339 | void (*done)(struct scsi_cmnd *)) | 339 | void (*done)(struct scsi_cmnd *)) |
340 | { | 340 | { |
341 | unsigned int x, y; | 341 | unsigned int x, y; |
@@ -438,6 +438,8 @@ static int eata_pio_queue(struct scsi_cmnd *cmd, | |||
438 | return 0; | 438 | return 0; |
439 | } | 439 | } |
440 | 440 | ||
441 | static DEF_SCSI_QCMD(eata_pio_queue) | ||
442 | |||
441 | static int eata_pio_abort(struct scsi_cmnd *cmd) | 443 | static int eata_pio_abort(struct scsi_cmnd *cmd) |
442 | { | 444 | { |
443 | unsigned int loop = 100; | 445 | unsigned int loop = 100; |
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index e2bc779f86c1..57558523c1b8 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c | |||
@@ -916,7 +916,7 @@ static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent) | |||
916 | scsi_track_queue_full(dev, lp->num_tagged - 1); | 916 | scsi_track_queue_full(dev, lp->num_tagged - 1); |
917 | } | 917 | } |
918 | 918 | ||
919 | static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 919 | static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
920 | { | 920 | { |
921 | struct scsi_device *dev = cmd->device; | 921 | struct scsi_device *dev = cmd->device; |
922 | struct esp *esp = shost_priv(dev->host); | 922 | struct esp *esp = shost_priv(dev->host); |
@@ -941,6 +941,8 @@ static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd | |||
941 | return 0; | 941 | return 0; |
942 | } | 942 | } |
943 | 943 | ||
944 | static DEF_SCSI_QCMD(esp_queuecommand) | ||
945 | |||
944 | static int esp_check_gross_error(struct esp *esp) | 946 | static int esp_check_gross_error(struct esp *esp) |
945 | { | 947 | { |
946 | if (esp->sreg & ESP_STAT_SPAM) { | 948 | if (esp->sreg & ESP_STAT_SPAM) { |
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c index 2ad95aa8f585..a2c6135d337e 100644 --- a/drivers/scsi/fd_mcs.c +++ b/drivers/scsi/fd_mcs.c | |||
@@ -1072,7 +1072,7 @@ static int fd_mcs_release(struct Scsi_Host *shpnt) | |||
1072 | return 0; | 1072 | return 0; |
1073 | } | 1073 | } |
1074 | 1074 | ||
1075 | static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) | 1075 | static int fd_mcs_queue_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) |
1076 | { | 1076 | { |
1077 | struct Scsi_Host *shpnt = SCpnt->device->host; | 1077 | struct Scsi_Host *shpnt = SCpnt->device->host; |
1078 | 1078 | ||
@@ -1122,6 +1122,8 @@ static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) | |||
1122 | return 0; | 1122 | return 0; |
1123 | } | 1123 | } |
1124 | 1124 | ||
1125 | static DEF_SCSI_QCMD(fd_mcs_queue) | ||
1126 | |||
1125 | #if DEBUG_ABORT || DEBUG_RESET | 1127 | #if DEBUG_ABORT || DEBUG_RESET |
1126 | static void fd_mcs_print_info(Scsi_Cmnd * SCpnt) | 1128 | static void fd_mcs_print_info(Scsi_Cmnd * SCpnt) |
1127 | { | 1129 | { |
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c index e296bcc57d5c..69b7aa54f43f 100644 --- a/drivers/scsi/fdomain.c +++ b/drivers/scsi/fdomain.c | |||
@@ -1419,7 +1419,7 @@ static irqreturn_t do_fdomain_16x0_intr(int irq, void *dev_id) | |||
1419 | return IRQ_HANDLED; | 1419 | return IRQ_HANDLED; |
1420 | } | 1420 | } |
1421 | 1421 | ||
1422 | static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt, | 1422 | static int fdomain_16x0_queue_lck(struct scsi_cmnd *SCpnt, |
1423 | void (*done)(struct scsi_cmnd *)) | 1423 | void (*done)(struct scsi_cmnd *)) |
1424 | { | 1424 | { |
1425 | if (in_command) { | 1425 | if (in_command) { |
@@ -1469,6 +1469,8 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt, | |||
1469 | return 0; | 1469 | return 0; |
1470 | } | 1470 | } |
1471 | 1471 | ||
1472 | static DEF_SCSI_QCMD(fdomain_16x0_queue) | ||
1473 | |||
1472 | #if DEBUG_ABORT | 1474 | #if DEBUG_ABORT |
1473 | static void print_info(struct scsi_cmnd *SCpnt) | 1475 | static void print_info(struct scsi_cmnd *SCpnt) |
1474 | { | 1476 | { |
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index cbb20b13b228..92f185081e62 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h | |||
@@ -246,7 +246,7 @@ void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *); | |||
246 | void fnic_update_mac(struct fc_lport *, u8 *new); | 246 | void fnic_update_mac(struct fc_lport *, u8 *new); |
247 | void fnic_update_mac_locked(struct fnic *, u8 *new); | 247 | void fnic_update_mac_locked(struct fnic *, u8 *new); |
248 | 248 | ||
249 | int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); | 249 | int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
250 | int fnic_abort_cmd(struct scsi_cmnd *); | 250 | int fnic_abort_cmd(struct scsi_cmnd *); |
251 | int fnic_device_reset(struct scsi_cmnd *); | 251 | int fnic_device_reset(struct scsi_cmnd *); |
252 | int fnic_host_reset(struct scsi_cmnd *); | 252 | int fnic_host_reset(struct scsi_cmnd *); |
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 198cbab3e894..22d02404d15f 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c | |||
@@ -349,7 +349,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, | |||
349 | * Routine to send a scsi cdb | 349 | * Routine to send a scsi cdb |
350 | * Called with host_lock held and interrupts disabled. | 350 | * Called with host_lock held and interrupts disabled. |
351 | */ | 351 | */ |
352 | int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | 352 | static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) |
353 | { | 353 | { |
354 | struct fc_lport *lp; | 354 | struct fc_lport *lp; |
355 | struct fc_rport *rport; | 355 | struct fc_rport *rport; |
@@ -457,6 +457,8 @@ out: | |||
457 | return ret; | 457 | return ret; |
458 | } | 458 | } |
459 | 459 | ||
460 | DEF_SCSI_QCMD(fnic_queuecommand) | ||
461 | |||
460 | /* | 462 | /* |
461 | * fnic_fcpio_fw_reset_cmpl_handler | 463 | * fnic_fcpio_fw_reset_cmpl_handler |
462 | * Routine to handle fw reset completion | 464 | * Routine to handle fw reset completion |
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h index 921764c9ab24..1bcdb7beb77b 100644 --- a/drivers/scsi/g_NCR5380.h +++ b/drivers/scsi/g_NCR5380.h | |||
@@ -46,7 +46,7 @@ | |||
46 | static int generic_NCR5380_abort(Scsi_Cmnd *); | 46 | static int generic_NCR5380_abort(Scsi_Cmnd *); |
47 | static int generic_NCR5380_detect(struct scsi_host_template *); | 47 | static int generic_NCR5380_detect(struct scsi_host_template *); |
48 | static int generic_NCR5380_release_resources(struct Scsi_Host *); | 48 | static int generic_NCR5380_release_resources(struct Scsi_Host *); |
49 | static int generic_NCR5380_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); | 49 | static int generic_NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *); |
50 | static int generic_NCR5380_bus_reset(Scsi_Cmnd *); | 50 | static int generic_NCR5380_bus_reset(Scsi_Cmnd *); |
51 | static const char* generic_NCR5380_info(struct Scsi_Host *); | 51 | static const char* generic_NCR5380_info(struct Scsi_Host *); |
52 | 52 | ||
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 841101846b88..76365700e2d5 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c | |||
@@ -185,7 +185,7 @@ static long gdth_unlocked_ioctl(struct file *filep, unsigned int cmd, | |||
185 | unsigned long arg); | 185 | unsigned long arg); |
186 | 186 | ||
187 | static void gdth_flush(gdth_ha_str *ha); | 187 | static void gdth_flush(gdth_ha_str *ha); |
188 | static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)); | 188 | static int gdth_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
189 | static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, | 189 | static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, |
190 | struct gdth_cmndinfo *cmndinfo); | 190 | struct gdth_cmndinfo *cmndinfo); |
191 | static void gdth_scsi_done(struct scsi_cmnd *scp); | 191 | static void gdth_scsi_done(struct scsi_cmnd *scp); |
@@ -4004,7 +4004,7 @@ static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,se | |||
4004 | } | 4004 | } |
4005 | 4005 | ||
4006 | 4006 | ||
4007 | static int gdth_queuecommand(struct scsi_cmnd *scp, | 4007 | static int gdth_queuecommand_lck(struct scsi_cmnd *scp, |
4008 | void (*done)(struct scsi_cmnd *)) | 4008 | void (*done)(struct scsi_cmnd *)) |
4009 | { | 4009 | { |
4010 | gdth_ha_str *ha = shost_priv(scp->device->host); | 4010 | gdth_ha_str *ha = shost_priv(scp->device->host); |
@@ -4022,6 +4022,8 @@ static int gdth_queuecommand(struct scsi_cmnd *scp, | |||
4022 | return __gdth_queuecommand(ha, scp, cmndinfo); | 4022 | return __gdth_queuecommand(ha, scp, cmndinfo); |
4023 | } | 4023 | } |
4024 | 4024 | ||
4025 | static DEF_SCSI_QCMD(gdth_queuecommand) | ||
4026 | |||
4025 | static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, | 4027 | static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, |
4026 | struct gdth_cmndinfo *cmndinfo) | 4028 | struct gdth_cmndinfo *cmndinfo) |
4027 | { | 4029 | { |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index c5d0606ad097..b2fb2b2a6e70 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/seq_file.h> | 31 | #include <linux/seq_file.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
34 | #include <linux/smp_lock.h> | ||
35 | #include <linux/compat.h> | 34 | #include <linux/compat.h> |
36 | #include <linux/blktrace_api.h> | 35 | #include <linux/blktrace_api.h> |
37 | #include <linux/uaccess.h> | 36 | #include <linux/uaccess.h> |
@@ -143,8 +142,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | |||
143 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, | 142 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, |
144 | int cmd_type); | 143 | int cmd_type); |
145 | 144 | ||
146 | static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, | 145 | static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
147 | void (*done)(struct scsi_cmnd *)); | ||
148 | static void hpsa_scan_start(struct Scsi_Host *); | 146 | static void hpsa_scan_start(struct Scsi_Host *); |
149 | static int hpsa_scan_finished(struct Scsi_Host *sh, | 147 | static int hpsa_scan_finished(struct Scsi_Host *sh, |
150 | unsigned long elapsed_time); | 148 | unsigned long elapsed_time); |
@@ -1926,7 +1924,7 @@ sglist_finished: | |||
1926 | } | 1924 | } |
1927 | 1925 | ||
1928 | 1926 | ||
1929 | static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, | 1927 | static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, |
1930 | void (*done)(struct scsi_cmnd *)) | 1928 | void (*done)(struct scsi_cmnd *)) |
1931 | { | 1929 | { |
1932 | struct ctlr_info *h; | 1930 | struct ctlr_info *h; |
@@ -2020,6 +2018,8 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, | |||
2020 | return 0; | 2018 | return 0; |
2021 | } | 2019 | } |
2022 | 2020 | ||
2021 | static DEF_SCSI_QCMD(hpsa_scsi_queue_command) | ||
2022 | |||
2023 | static void hpsa_scan_start(struct Scsi_Host *sh) | 2023 | static void hpsa_scan_start(struct Scsi_Host *sh) |
2024 | { | 2024 | { |
2025 | struct ctlr_info *h = shost_to_hba(sh); | 2025 | struct ctlr_info *h = shost_to_hba(sh); |
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index 0729f150b33a..10b65556937b 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c | |||
@@ -751,7 +751,7 @@ static void hptiop_post_req_mv(struct hptiop_hba *hba, | |||
751 | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba); | 751 | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba); |
752 | } | 752 | } |
753 | 753 | ||
754 | static int hptiop_queuecommand(struct scsi_cmnd *scp, | 754 | static int hptiop_queuecommand_lck(struct scsi_cmnd *scp, |
755 | void (*done)(struct scsi_cmnd *)) | 755 | void (*done)(struct scsi_cmnd *)) |
756 | { | 756 | { |
757 | struct Scsi_Host *host = scp->device->host; | 757 | struct Scsi_Host *host = scp->device->host; |
@@ -819,6 +819,8 @@ cmd_done: | |||
819 | return 0; | 819 | return 0; |
820 | } | 820 | } |
821 | 821 | ||
822 | static DEF_SCSI_QCMD(hptiop_queuecommand) | ||
823 | |||
822 | static const char *hptiop_info(struct Scsi_Host *host) | 824 | static const char *hptiop_info(struct Scsi_Host *host) |
823 | { | 825 | { |
824 | return driver_name_long; | 826 | return driver_name_long; |
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c index 9a4b69d4f4eb..67fc8ffd52e6 100644 --- a/drivers/scsi/ibmmca.c +++ b/drivers/scsi/ibmmca.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #include <scsi/scsi_host.h> | 39 | #include <scsi/scsi_host.h> |
40 | 40 | ||
41 | /* Common forward declarations for all Linux-versions: */ | 41 | /* Common forward declarations for all Linux-versions: */ |
42 | static int ibmmca_queuecommand (Scsi_Cmnd *, void (*done) (Scsi_Cmnd *)); | 42 | static int ibmmca_queuecommand (struct Scsi_Host *, struct scsi_cmnd *); |
43 | static int ibmmca_abort (Scsi_Cmnd *); | 43 | static int ibmmca_abort (Scsi_Cmnd *); |
44 | static int ibmmca_host_reset (Scsi_Cmnd *); | 44 | static int ibmmca_host_reset (Scsi_Cmnd *); |
45 | static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *); | 45 | static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *); |
@@ -1691,7 +1691,7 @@ static int __devexit ibmmca_remove(struct device *dev) | |||
1691 | } | 1691 | } |
1692 | 1692 | ||
1693 | /* The following routine is the SCSI command queue for the midlevel driver */ | 1693 | /* The following routine is the SCSI command queue for the midlevel driver */ |
1694 | static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) | 1694 | static int ibmmca_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) |
1695 | { | 1695 | { |
1696 | unsigned int ldn; | 1696 | unsigned int ldn; |
1697 | unsigned int scsi_cmd; | 1697 | unsigned int scsi_cmd; |
@@ -1996,6 +1996,8 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) | |||
1996 | return 0; | 1996 | return 0; |
1997 | } | 1997 | } |
1998 | 1998 | ||
1999 | static DEF_SCSI_QCMD(ibmmca_queuecommand) | ||
2000 | |||
1999 | static int __ibmmca_abort(Scsi_Cmnd * cmd) | 2001 | static int __ibmmca_abort(Scsi_Cmnd * cmd) |
2000 | { | 2002 | { |
2001 | /* Abort does not work, as the adapter never generates an interrupt on | 2003 | /* Abort does not work, as the adapter never generates an interrupt on |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 00d08b25425f..57cad7e20caa 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -1606,7 +1606,7 @@ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost) | |||
1606 | * Returns: | 1606 | * Returns: |
1607 | * 0 on success / other on failure | 1607 | * 0 on success / other on failure |
1608 | **/ | 1608 | **/ |
1609 | static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd, | 1609 | static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd, |
1610 | void (*done) (struct scsi_cmnd *)) | 1610 | void (*done) (struct scsi_cmnd *)) |
1611 | { | 1611 | { |
1612 | struct ibmvfc_host *vhost = shost_priv(cmnd->device->host); | 1612 | struct ibmvfc_host *vhost = shost_priv(cmnd->device->host); |
@@ -1672,6 +1672,8 @@ static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd, | |||
1672 | return 0; | 1672 | return 0; |
1673 | } | 1673 | } |
1674 | 1674 | ||
1675 | static DEF_SCSI_QCMD(ibmvfc_queuecommand) | ||
1676 | |||
1675 | /** | 1677 | /** |
1676 | * ibmvfc_sync_completion - Signal that a synchronous command has completed | 1678 | * ibmvfc_sync_completion - Signal that a synchronous command has completed |
1677 | * @evt: ibmvfc event struct | 1679 | * @evt: ibmvfc event struct |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 67f78a470f5f..041958453e2a 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -713,7 +713,7 @@ static inline u16 lun_from_dev(struct scsi_device *dev) | |||
713 | * @cmd: struct scsi_cmnd to be executed | 713 | * @cmd: struct scsi_cmnd to be executed |
714 | * @done: Callback function to be called when cmd is completed | 714 | * @done: Callback function to be called when cmd is completed |
715 | */ | 715 | */ |
716 | static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | 716 | static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd, |
717 | void (*done) (struct scsi_cmnd *)) | 717 | void (*done) (struct scsi_cmnd *)) |
718 | { | 718 | { |
719 | struct srp_cmd *srp_cmd; | 719 | struct srp_cmd *srp_cmd; |
@@ -766,6 +766,8 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | |||
766 | return ibmvscsi_send_srp_event(evt_struct, hostdata, 0); | 766 | return ibmvscsi_send_srp_event(evt_struct, hostdata, 0); |
767 | } | 767 | } |
768 | 768 | ||
769 | static DEF_SCSI_QCMD(ibmvscsi_queuecommand) | ||
770 | |||
769 | /* ------------------------------------------------------------ | 771 | /* ------------------------------------------------------------ |
770 | * Routines for driver initialization | 772 | * Routines for driver initialization |
771 | */ | 773 | */ |
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index 4734ab0b3ff6..99aa0e5699bc 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c | |||
@@ -926,7 +926,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) | |||
926 | return 0; | 926 | return 0; |
927 | } | 927 | } |
928 | 928 | ||
929 | static int imm_queuecommand(struct scsi_cmnd *cmd, | 929 | static int imm_queuecommand_lck(struct scsi_cmnd *cmd, |
930 | void (*done)(struct scsi_cmnd *)) | 930 | void (*done)(struct scsi_cmnd *)) |
931 | { | 931 | { |
932 | imm_struct *dev = imm_dev(cmd->device->host); | 932 | imm_struct *dev = imm_dev(cmd->device->host); |
@@ -949,6 +949,8 @@ static int imm_queuecommand(struct scsi_cmnd *cmd, | |||
949 | return 0; | 949 | return 0; |
950 | } | 950 | } |
951 | 951 | ||
952 | static DEF_SCSI_QCMD(imm_queuecommand) | ||
953 | |||
952 | /* | 954 | /* |
953 | * Apparently the disk->capacity attribute is off by 1 sector | 955 | * Apparently the disk->capacity attribute is off by 1 sector |
954 | * for all disk drives. We add the one here, but it should really | 956 | * for all disk drives. We add the one here, but it should really |
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c index 52bdc6df6b92..6568aab745a0 100644 --- a/drivers/scsi/in2000.c +++ b/drivers/scsi/in2000.c | |||
@@ -334,7 +334,7 @@ static uchar calc_sync_xfer(unsigned int period, unsigned int offset) | |||
334 | 334 | ||
335 | static void in2000_execute(struct Scsi_Host *instance); | 335 | static void in2000_execute(struct Scsi_Host *instance); |
336 | 336 | ||
337 | static int in2000_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) | 337 | static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) |
338 | { | 338 | { |
339 | struct Scsi_Host *instance; | 339 | struct Scsi_Host *instance; |
340 | struct IN2000_hostdata *hostdata; | 340 | struct IN2000_hostdata *hostdata; |
@@ -431,6 +431,8 @@ static int in2000_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) | |||
431 | return 0; | 431 | return 0; |
432 | } | 432 | } |
433 | 433 | ||
434 | static DEF_SCSI_QCMD(in2000_queuecommand) | ||
435 | |||
434 | 436 | ||
435 | 437 | ||
436 | /* | 438 | /* |
diff --git a/drivers/scsi/in2000.h b/drivers/scsi/in2000.h index 0fb8b06b8392..5821e1fbce08 100644 --- a/drivers/scsi/in2000.h +++ b/drivers/scsi/in2000.h | |||
@@ -396,7 +396,7 @@ struct IN2000_hostdata { | |||
396 | flags) | 396 | flags) |
397 | 397 | ||
398 | static int in2000_detect(struct scsi_host_template *) in2000__INIT; | 398 | static int in2000_detect(struct scsi_host_template *) in2000__INIT; |
399 | static int in2000_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); | 399 | static int in2000_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
400 | static int in2000_abort(Scsi_Cmnd *); | 400 | static int in2000_abort(Scsi_Cmnd *); |
401 | static void in2000_setup(char *, int *) in2000__INIT; | 401 | static void in2000_setup(char *, int *) in2000__INIT; |
402 | static int in2000_biosparam(struct scsi_device *, struct block_device *, | 402 | static int in2000_biosparam(struct scsi_device *, struct block_device *, |
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index 108797761b95..9627d062e16b 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c | |||
@@ -2639,7 +2639,7 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c | |||
2639 | * will cause the mid layer to call us again later with the command) | 2639 | * will cause the mid layer to call us again later with the command) |
2640 | */ | 2640 | */ |
2641 | 2641 | ||
2642 | static int i91u_queuecommand(struct scsi_cmnd *cmd, | 2642 | static int i91u_queuecommand_lck(struct scsi_cmnd *cmd, |
2643 | void (*done)(struct scsi_cmnd *)) | 2643 | void (*done)(struct scsi_cmnd *)) |
2644 | { | 2644 | { |
2645 | struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata; | 2645 | struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata; |
@@ -2656,6 +2656,8 @@ static int i91u_queuecommand(struct scsi_cmnd *cmd, | |||
2656 | return 0; | 2656 | return 0; |
2657 | } | 2657 | } |
2658 | 2658 | ||
2659 | static DEF_SCSI_QCMD(i91u_queuecommand) | ||
2660 | |||
2659 | /** | 2661 | /** |
2660 | * i91u_bus_reset - reset the SCSI bus | 2662 | * i91u_bus_reset - reset the SCSI bus |
2661 | * @cmnd: Command block we want to trigger the reset for | 2663 | * @cmnd: Command block we want to trigger the reset for |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index fa60d7df44be..5bbaee597e88 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -5709,7 +5709,7 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) | |||
5709 | * SCSI_MLQUEUE_DEVICE_BUSY if device is busy | 5709 | * SCSI_MLQUEUE_DEVICE_BUSY if device is busy |
5710 | * SCSI_MLQUEUE_HOST_BUSY if host is busy | 5710 | * SCSI_MLQUEUE_HOST_BUSY if host is busy |
5711 | **/ | 5711 | **/ |
5712 | static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, | 5712 | static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, |
5713 | void (*done) (struct scsi_cmnd *)) | 5713 | void (*done) (struct scsi_cmnd *)) |
5714 | { | 5714 | { |
5715 | struct ipr_ioa_cfg *ioa_cfg; | 5715 | struct ipr_ioa_cfg *ioa_cfg; |
@@ -5792,6 +5792,8 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, | |||
5792 | return 0; | 5792 | return 0; |
5793 | } | 5793 | } |
5794 | 5794 | ||
5795 | static DEF_SCSI_QCMD(ipr_queuecommand) | ||
5796 | |||
5795 | /** | 5797 | /** |
5796 | * ipr_ioctl - IOCTL handler | 5798 | * ipr_ioctl - IOCTL handler |
5797 | * @sdev: scsi device struct | 5799 | * @sdev: scsi device struct |
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index f83a116955f2..b2511acd39bd 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c | |||
@@ -232,7 +232,7 @@ static int ips_detect(struct scsi_host_template *); | |||
232 | static int ips_release(struct Scsi_Host *); | 232 | static int ips_release(struct Scsi_Host *); |
233 | static int ips_eh_abort(struct scsi_cmnd *); | 233 | static int ips_eh_abort(struct scsi_cmnd *); |
234 | static int ips_eh_reset(struct scsi_cmnd *); | 234 | static int ips_eh_reset(struct scsi_cmnd *); |
235 | static int ips_queue(struct scsi_cmnd *, void (*)(struct scsi_cmnd *)); | 235 | static int ips_queue(struct Scsi_Host *, struct scsi_cmnd *); |
236 | static const char *ips_info(struct Scsi_Host *); | 236 | static const char *ips_info(struct Scsi_Host *); |
237 | static irqreturn_t do_ipsintr(int, void *); | 237 | static irqreturn_t do_ipsintr(int, void *); |
238 | static int ips_hainit(ips_ha_t *); | 238 | static int ips_hainit(ips_ha_t *); |
@@ -1046,7 +1046,7 @@ static int ips_eh_reset(struct scsi_cmnd *SC) | |||
1046 | /* Linux obtains io_request_lock before calling this function */ | 1046 | /* Linux obtains io_request_lock before calling this function */ |
1047 | /* */ | 1047 | /* */ |
1048 | /****************************************************************************/ | 1048 | /****************************************************************************/ |
1049 | static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)) | 1049 | static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)) |
1050 | { | 1050 | { |
1051 | ips_ha_t *ha; | 1051 | ips_ha_t *ha; |
1052 | ips_passthru_t *pt; | 1052 | ips_passthru_t *pt; |
@@ -1137,6 +1137,8 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)) | |||
1137 | return (0); | 1137 | return (0); |
1138 | } | 1138 | } |
1139 | 1139 | ||
1140 | static DEF_SCSI_QCMD(ips_queue) | ||
1141 | |||
1140 | /****************************************************************************/ | 1142 | /****************************************************************************/ |
1141 | /* */ | 1143 | /* */ |
1142 | /* Routine Name: ips_biosparam */ | 1144 | /* Routine Name: ips_biosparam */ |
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index e340373b509b..2924363d142b 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -1753,7 +1753,7 @@ static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport) | |||
1753 | * This is the i/o strategy routine, called by the SCSI layer. This routine | 1753 | * This is the i/o strategy routine, called by the SCSI layer. This routine |
1754 | * is called with the host_lock held. | 1754 | * is called with the host_lock held. |
1755 | */ | 1755 | */ |
1756 | int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | 1756 | static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) |
1757 | { | 1757 | { |
1758 | struct fc_lport *lport; | 1758 | struct fc_lport *lport; |
1759 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); | 1759 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); |
@@ -1851,6 +1851,8 @@ out: | |||
1851 | spin_lock_irq(lport->host->host_lock); | 1851 | spin_lock_irq(lport->host->host_lock); |
1852 | return rc; | 1852 | return rc; |
1853 | } | 1853 | } |
1854 | |||
1855 | DEF_SCSI_QCMD(fc_queuecommand) | ||
1854 | EXPORT_SYMBOL(fc_queuecommand); | 1856 | EXPORT_SYMBOL(fc_queuecommand); |
1855 | 1857 | ||
1856 | /** | 1858 | /** |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 633e09036357..c15fde808c33 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -1599,7 +1599,7 @@ enum { | |||
1599 | FAILURE_SESSION_NOT_READY, | 1599 | FAILURE_SESSION_NOT_READY, |
1600 | }; | 1600 | }; |
1601 | 1601 | ||
1602 | int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | 1602 | static int iscsi_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) |
1603 | { | 1603 | { |
1604 | struct iscsi_cls_session *cls_session; | 1604 | struct iscsi_cls_session *cls_session; |
1605 | struct Scsi_Host *host; | 1605 | struct Scsi_Host *host; |
@@ -1736,6 +1736,8 @@ fault: | |||
1736 | spin_lock(host->host_lock); | 1736 | spin_lock(host->host_lock); |
1737 | return 0; | 1737 | return 0; |
1738 | } | 1738 | } |
1739 | |||
1740 | DEF_SCSI_QCMD(iscsi_queuecommand) | ||
1739 | EXPORT_SYMBOL_GPL(iscsi_queuecommand); | 1741 | EXPORT_SYMBOL_GPL(iscsi_queuecommand); |
1740 | 1742 | ||
1741 | int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) | 1743 | int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 55f09e92ab59..29251fabecc6 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
@@ -189,7 +189,7 @@ int sas_queue_up(struct sas_task *task) | |||
189 | * Note: XXX: Remove the host unlock/lock pair when SCSI Core can | 189 | * Note: XXX: Remove the host unlock/lock pair when SCSI Core can |
190 | * call us without holding an IRQ spinlock... | 190 | * call us without holding an IRQ spinlock... |
191 | */ | 191 | */ |
192 | int sas_queuecommand(struct scsi_cmnd *cmd, | 192 | static int sas_queuecommand_lck(struct scsi_cmnd *cmd, |
193 | void (*scsi_done)(struct scsi_cmnd *)) | 193 | void (*scsi_done)(struct scsi_cmnd *)) |
194 | __releases(host->host_lock) | 194 | __releases(host->host_lock) |
195 | __acquires(dev->sata_dev.ap->lock) | 195 | __acquires(dev->sata_dev.ap->lock) |
@@ -254,6 +254,8 @@ out: | |||
254 | return res; | 254 | return res; |
255 | } | 255 | } |
256 | 256 | ||
257 | DEF_SCSI_QCMD(sas_queuecommand) | ||
258 | |||
257 | static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) | 259 | static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) |
258 | { | 260 | { |
259 | struct sas_task *task = TO_SAS_TASK(cmd); | 261 | struct sas_task *task = TO_SAS_TASK(cmd); |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index f64b65a770b8..581837b3c71a 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -2899,7 +2899,7 @@ void lpfc_poll_timeout(unsigned long ptr) | |||
2899 | * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. | 2899 | * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. |
2900 | **/ | 2900 | **/ |
2901 | static int | 2901 | static int |
2902 | lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | 2902 | lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) |
2903 | { | 2903 | { |
2904 | struct Scsi_Host *shost = cmnd->device->host; | 2904 | struct Scsi_Host *shost = cmnd->device->host; |
2905 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 2905 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
@@ -3060,6 +3060,8 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
3060 | return 0; | 3060 | return 0; |
3061 | } | 3061 | } |
3062 | 3062 | ||
3063 | static DEF_SCSI_QCMD(lpfc_queuecommand) | ||
3064 | |||
3063 | /** | 3065 | /** |
3064 | * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point | 3066 | * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point |
3065 | * @cmnd: Pointer to scsi_cmnd data structure. | 3067 | * @cmnd: Pointer to scsi_cmnd data structure. |
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c index 3ddb4dc62d5d..6c42dff0f4d3 100644 --- a/drivers/scsi/mac53c94.c +++ b/drivers/scsi/mac53c94.c | |||
@@ -66,7 +66,7 @@ static void cmd_done(struct fsc_state *, int result); | |||
66 | static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *); | 66 | static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *); |
67 | 67 | ||
68 | 68 | ||
69 | static int mac53c94_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 69 | static int mac53c94_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
70 | { | 70 | { |
71 | struct fsc_state *state; | 71 | struct fsc_state *state; |
72 | 72 | ||
@@ -99,6 +99,8 @@ static int mac53c94_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd * | |||
99 | return 0; | 99 | return 0; |
100 | } | 100 | } |
101 | 101 | ||
102 | static DEF_SCSI_QCMD(mac53c94_queue) | ||
103 | |||
102 | static int mac53c94_host_reset(struct scsi_cmnd *cmd) | 104 | static int mac53c94_host_reset(struct scsi_cmnd *cmd) |
103 | { | 105 | { |
104 | struct fsc_state *state = (struct fsc_state *) cmd->device->host->hostdata; | 106 | struct fsc_state *state = (struct fsc_state *) cmd->device->host->hostdata; |
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 7ceb5cf12c6b..9aa048525eb2 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c | |||
@@ -366,7 +366,7 @@ mega_runpendq(adapter_t *adapter) | |||
366 | * The command queuing entry point for the mid-layer. | 366 | * The command queuing entry point for the mid-layer. |
367 | */ | 367 | */ |
368 | static int | 368 | static int |
369 | megaraid_queue(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *)) | 369 | megaraid_queue_lck(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *)) |
370 | { | 370 | { |
371 | adapter_t *adapter; | 371 | adapter_t *adapter; |
372 | scb_t *scb; | 372 | scb_t *scb; |
@@ -409,6 +409,8 @@ megaraid_queue(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *)) | |||
409 | return busy; | 409 | return busy; |
410 | } | 410 | } |
411 | 411 | ||
412 | static DEF_SCSI_QCMD(megaraid_queue) | ||
413 | |||
412 | /** | 414 | /** |
413 | * mega_allocate_scb() | 415 | * mega_allocate_scb() |
414 | * @adapter - pointer to our soft state | 416 | * @adapter - pointer to our soft state |
@@ -4456,7 +4458,7 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) | |||
4456 | 4458 | ||
4457 | scb->idx = CMDID_INT_CMDS; | 4459 | scb->idx = CMDID_INT_CMDS; |
4458 | 4460 | ||
4459 | megaraid_queue(scmd, mega_internal_done); | 4461 | megaraid_queue_lck(scmd, mega_internal_done); |
4460 | 4462 | ||
4461 | wait_for_completion(&adapter->int_waitq); | 4463 | wait_for_completion(&adapter->int_waitq); |
4462 | 4464 | ||
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h index 2b4a048cadf1..f5644745e24e 100644 --- a/drivers/scsi/megaraid.h +++ b/drivers/scsi/megaraid.h | |||
@@ -987,7 +987,7 @@ static int mega_query_adapter(adapter_t *); | |||
987 | static int issue_scb(adapter_t *, scb_t *); | 987 | static int issue_scb(adapter_t *, scb_t *); |
988 | static int mega_setup_mailbox(adapter_t *); | 988 | static int mega_setup_mailbox(adapter_t *); |
989 | 989 | ||
990 | static int megaraid_queue (Scsi_Cmnd *, void (*)(Scsi_Cmnd *)); | 990 | static int megaraid_queue (struct Scsi_Host *, struct scsi_cmnd *); |
991 | static scb_t * mega_build_cmd(adapter_t *, Scsi_Cmnd *, int *); | 991 | static scb_t * mega_build_cmd(adapter_t *, Scsi_Cmnd *, int *); |
992 | static void __mega_runpendq(adapter_t *); | 992 | static void __mega_runpendq(adapter_t *); |
993 | static int issue_scb_block(adapter_t *, u_char *); | 993 | static int issue_scb_block(adapter_t *, u_char *); |
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index a7810a106b37..5708cb27d078 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c | |||
@@ -113,8 +113,7 @@ static int megaraid_mbox_fire_sync_cmd(adapter_t *); | |||
113 | static void megaraid_mbox_display_scb(adapter_t *, scb_t *); | 113 | static void megaraid_mbox_display_scb(adapter_t *, scb_t *); |
114 | static void megaraid_mbox_setup_device_map(adapter_t *); | 114 | static void megaraid_mbox_setup_device_map(adapter_t *); |
115 | 115 | ||
116 | static int megaraid_queue_command(struct scsi_cmnd *, | 116 | static int megaraid_queue_command(struct Scsi_Host *, struct scsi_cmnd *); |
117 | void (*)(struct scsi_cmnd *)); | ||
118 | static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *); | 117 | static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *); |
119 | static void megaraid_mbox_runpendq(adapter_t *, scb_t *); | 118 | static void megaraid_mbox_runpendq(adapter_t *, scb_t *); |
120 | static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *, | 119 | static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *, |
@@ -1484,7 +1483,7 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb) | |||
1484 | * Queue entry point for mailbox based controllers. | 1483 | * Queue entry point for mailbox based controllers. |
1485 | */ | 1484 | */ |
1486 | static int | 1485 | static int |
1487 | megaraid_queue_command(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) | 1486 | megaraid_queue_command_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) |
1488 | { | 1487 | { |
1489 | adapter_t *adapter; | 1488 | adapter_t *adapter; |
1490 | scb_t *scb; | 1489 | scb_t *scb; |
@@ -1513,6 +1512,8 @@ megaraid_queue_command(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) | |||
1513 | return if_busy; | 1512 | return if_busy; |
1514 | } | 1513 | } |
1515 | 1514 | ||
1515 | static DEF_SCSI_QCMD(megaraid_queue_command) | ||
1516 | |||
1516 | /** | 1517 | /** |
1517 | * megaraid_mbox_build_cmd - transform the mid-layer scsi commands | 1518 | * megaraid_mbox_build_cmd - transform the mid-layer scsi commands |
1518 | * @adapter : controller's soft state | 1519 | * @adapter : controller's soft state |
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index eb29d5085131..7451bc096a01 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c | |||
@@ -1334,7 +1334,7 @@ megasas_dump_pending_frames(struct megasas_instance *instance) | |||
1334 | * @done: Callback entry point | 1334 | * @done: Callback entry point |
1335 | */ | 1335 | */ |
1336 | static int | 1336 | static int |
1337 | megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) | 1337 | megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) |
1338 | { | 1338 | { |
1339 | u32 frame_count; | 1339 | u32 frame_count; |
1340 | struct megasas_cmd *cmd; | 1340 | struct megasas_cmd *cmd; |
@@ -1417,6 +1417,8 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) | |||
1417 | return 0; | 1417 | return 0; |
1418 | } | 1418 | } |
1419 | 1419 | ||
1420 | static DEF_SCSI_QCMD(megasas_queue_command) | ||
1421 | |||
1420 | static struct megasas_instance *megasas_lookup_instance(u16 host_no) | 1422 | static struct megasas_instance *megasas_lookup_instance(u16 host_no) |
1421 | { | 1423 | { |
1422 | int i; | 1424 | int i; |
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index 1f784fde2510..197aa1b3f0f3 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c | |||
@@ -1627,7 +1627,7 @@ static void cmd_complete(struct mesh_state *ms) | |||
1627 | * Called by midlayer with host locked to queue a new | 1627 | * Called by midlayer with host locked to queue a new |
1628 | * request | 1628 | * request |
1629 | */ | 1629 | */ |
1630 | static int mesh_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 1630 | static int mesh_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
1631 | { | 1631 | { |
1632 | struct mesh_state *ms; | 1632 | struct mesh_state *ms; |
1633 | 1633 | ||
@@ -1648,6 +1648,8 @@ static int mesh_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | |||
1648 | return 0; | 1648 | return 0; |
1649 | } | 1649 | } |
1650 | 1650 | ||
1651 | static DEF_SCSI_QCMD(mesh_queue) | ||
1652 | |||
1651 | /* | 1653 | /* |
1652 | * Called to handle interrupts, either call by the interrupt | 1654 | * Called to handle interrupts, either call by the interrupt |
1653 | * handler (do_mesh_interrupt) or by other functions in | 1655 | * handler (do_mesh_interrupt) or by other functions in |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 16e99b686354..1a96a00418a4 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c | |||
@@ -3315,7 +3315,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) | |||
3315 | * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full | 3315 | * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full |
3316 | */ | 3316 | */ |
3317 | static int | 3317 | static int |
3318 | _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) | 3318 | _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) |
3319 | { | 3319 | { |
3320 | struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); | 3320 | struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); |
3321 | struct MPT2SAS_DEVICE *sas_device_priv_data; | 3321 | struct MPT2SAS_DEVICE *sas_device_priv_data; |
@@ -3441,6 +3441,8 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) | |||
3441 | return SCSI_MLQUEUE_HOST_BUSY; | 3441 | return SCSI_MLQUEUE_HOST_BUSY; |
3442 | } | 3442 | } |
3443 | 3443 | ||
3444 | static DEF_SCSI_QCMD(_scsih_qcmd) | ||
3445 | |||
3444 | /** | 3446 | /** |
3445 | * _scsih_normalize_sense - normalize descriptor and fixed format sense data | 3447 | * _scsih_normalize_sense - normalize descriptor and fixed format sense data |
3446 | * @sense_buffer: sense data returned by target | 3448 | * @sense_buffer: sense data returned by target |
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index d013a2aa2fd5..46cc3825638d 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c | |||
@@ -8029,7 +8029,7 @@ static int ncr53c8xx_slave_configure(struct scsi_device *device) | |||
8029 | return 0; | 8029 | return 0; |
8030 | } | 8030 | } |
8031 | 8031 | ||
8032 | static int ncr53c8xx_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | 8032 | static int ncr53c8xx_queue_command_lck (struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
8033 | { | 8033 | { |
8034 | struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb; | 8034 | struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb; |
8035 | unsigned long flags; | 8035 | unsigned long flags; |
@@ -8068,6 +8068,8 @@ printk("ncr53c8xx : command successfully queued\n"); | |||
8068 | return sts; | 8068 | return sts; |
8069 | } | 8069 | } |
8070 | 8070 | ||
8071 | static DEF_SCSI_QCMD(ncr53c8xx_queue_command) | ||
8072 | |||
8071 | irqreturn_t ncr53c8xx_intr(int irq, void *dev_id) | 8073 | irqreturn_t ncr53c8xx_intr(int irq, void *dev_id) |
8072 | { | 8074 | { |
8073 | unsigned long flags; | 8075 | unsigned long flags; |
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index 4c1e54545200..6b8b021400f8 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c | |||
@@ -196,8 +196,7 @@ static void __exit exit_nsp32 (void); | |||
196 | static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int); | 196 | static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int); |
197 | 197 | ||
198 | static int nsp32_detect (struct pci_dev *pdev); | 198 | static int nsp32_detect (struct pci_dev *pdev); |
199 | static int nsp32_queuecommand(struct scsi_cmnd *, | 199 | static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
200 | void (*done)(struct scsi_cmnd *)); | ||
201 | static const char *nsp32_info (struct Scsi_Host *); | 200 | static const char *nsp32_info (struct Scsi_Host *); |
202 | static int nsp32_release (struct Scsi_Host *); | 201 | static int nsp32_release (struct Scsi_Host *); |
203 | 202 | ||
@@ -909,7 +908,7 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt) | |||
909 | return TRUE; | 908 | return TRUE; |
910 | } | 909 | } |
911 | 910 | ||
912 | static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 911 | static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) |
913 | { | 912 | { |
914 | nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; | 913 | nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; |
915 | nsp32_target *target; | 914 | nsp32_target *target; |
@@ -1050,6 +1049,8 @@ static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ | |||
1050 | return 0; | 1049 | return 0; |
1051 | } | 1050 | } |
1052 | 1051 | ||
1052 | static DEF_SCSI_QCMD(nsp32_queuecommand) | ||
1053 | |||
1053 | /* initialize asic */ | 1054 | /* initialize asic */ |
1054 | static int nsp32hw_init(nsp32_hw_data *data) | 1055 | static int nsp32hw_init(nsp32_hw_data *data) |
1055 | { | 1056 | { |
diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h index 8dc5b1a5f5da..a04281cace2e 100644 --- a/drivers/scsi/pas16.h +++ b/drivers/scsi/pas16.h | |||
@@ -118,7 +118,7 @@ static int pas16_abort(Scsi_Cmnd *); | |||
118 | static int pas16_biosparam(struct scsi_device *, struct block_device *, | 118 | static int pas16_biosparam(struct scsi_device *, struct block_device *, |
119 | sector_t, int*); | 119 | sector_t, int*); |
120 | static int pas16_detect(struct scsi_host_template *); | 120 | static int pas16_detect(struct scsi_host_template *); |
121 | static int pas16_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); | 121 | static int pas16_queue_command(struct Scsi_Host *, struct scsi_cmnd *); |
122 | static int pas16_bus_reset(Scsi_Cmnd *); | 122 | static int pas16_bus_reset(Scsi_Cmnd *); |
123 | 123 | ||
124 | #ifndef CMD_PER_LUN | 124 | #ifndef CMD_PER_LUN |
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 9326c2c14880..be3f33d31a99 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c | |||
@@ -184,7 +184,7 @@ static void nsp_scsi_done(struct scsi_cmnd *SCpnt) | |||
184 | SCpnt->scsi_done(SCpnt); | 184 | SCpnt->scsi_done(SCpnt); |
185 | } | 185 | } |
186 | 186 | ||
187 | static int nsp_queuecommand(struct scsi_cmnd *SCpnt, | 187 | static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt, |
188 | void (*done)(struct scsi_cmnd *)) | 188 | void (*done)(struct scsi_cmnd *)) |
189 | { | 189 | { |
190 | #ifdef NSP_DEBUG | 190 | #ifdef NSP_DEBUG |
@@ -264,6 +264,8 @@ static int nsp_queuecommand(struct scsi_cmnd *SCpnt, | |||
264 | return 0; | 264 | return 0; |
265 | } | 265 | } |
266 | 266 | ||
267 | static DEF_SCSI_QCMD(nsp_queuecommand) | ||
268 | |||
267 | /* | 269 | /* |
268 | * setup PIO FIFO transfer mode and enable/disable to data out | 270 | * setup PIO FIFO transfer mode and enable/disable to data out |
269 | */ | 271 | */ |
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h index d68c9f267c5e..7fc9a9d0a448 100644 --- a/drivers/scsi/pcmcia/nsp_cs.h +++ b/drivers/scsi/pcmcia/nsp_cs.h | |||
@@ -299,8 +299,7 @@ static int nsp_proc_info ( | |||
299 | off_t offset, | 299 | off_t offset, |
300 | int length, | 300 | int length, |
301 | int inout); | 301 | int inout); |
302 | static int nsp_queuecommand(struct scsi_cmnd *SCpnt, | 302 | static int nsp_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *SCpnt); |
303 | void (* done)(struct scsi_cmnd *SCpnt)); | ||
304 | 303 | ||
305 | /* Error handler */ | 304 | /* Error handler */ |
306 | /*static int nsp_eh_abort (struct scsi_cmnd *SCpnt);*/ | 305 | /*static int nsp_eh_abort (struct scsi_cmnd *SCpnt);*/ |
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c index 0ae27cb5cd6f..8552296edaa1 100644 --- a/drivers/scsi/pcmcia/sym53c500_cs.c +++ b/drivers/scsi/pcmcia/sym53c500_cs.c | |||
@@ -547,7 +547,7 @@ SYM53C500_info(struct Scsi_Host *SChost) | |||
547 | } | 547 | } |
548 | 548 | ||
549 | static int | 549 | static int |
550 | SYM53C500_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | 550 | SYM53C500_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) |
551 | { | 551 | { |
552 | int i; | 552 | int i; |
553 | int port_base = SCpnt->device->host->io_port; | 553 | int port_base = SCpnt->device->host->io_port; |
@@ -583,6 +583,8 @@ SYM53C500_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | |||
583 | return 0; | 583 | return 0; |
584 | } | 584 | } |
585 | 585 | ||
586 | static DEF_SCSI_QCMD(SYM53C500_queue) | ||
587 | |||
586 | static int | 588 | static int |
587 | SYM53C500_host_reset(struct scsi_cmnd *SCpnt) | 589 | SYM53C500_host_reset(struct scsi_cmnd *SCpnt) |
588 | { | 590 | { |
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 8e38ca8cd101..7f064f9ca828 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h | |||
@@ -50,7 +50,6 @@ | |||
50 | #include <linux/dma-mapping.h> | 50 | #include <linux/dma-mapping.h> |
51 | #include <linux/pci.h> | 51 | #include <linux/pci.h> |
52 | #include <linux/interrupt.h> | 52 | #include <linux/interrupt.h> |
53 | #include <linux/smp_lock.h> | ||
54 | #include <scsi/libsas.h> | 53 | #include <scsi/libsas.h> |
55 | #include <scsi/scsi_tcq.h> | 54 | #include <scsi/scsi_tcq.h> |
56 | #include <scsi/sas_ata.h> | 55 | #include <scsi/sas_ata.h> |
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index cf89091e4c3d..5e76a624cb08 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c | |||
@@ -3478,7 +3478,7 @@ static int pmcraid_copy_sglist( | |||
3478 | * SCSI_MLQUEUE_DEVICE_BUSY if device is busy | 3478 | * SCSI_MLQUEUE_DEVICE_BUSY if device is busy |
3479 | * SCSI_MLQUEUE_HOST_BUSY if host is busy | 3479 | * SCSI_MLQUEUE_HOST_BUSY if host is busy |
3480 | */ | 3480 | */ |
3481 | static int pmcraid_queuecommand( | 3481 | static int pmcraid_queuecommand_lck( |
3482 | struct scsi_cmnd *scsi_cmd, | 3482 | struct scsi_cmnd *scsi_cmd, |
3483 | void (*done) (struct scsi_cmnd *) | 3483 | void (*done) (struct scsi_cmnd *) |
3484 | ) | 3484 | ) |
@@ -3584,6 +3584,8 @@ static int pmcraid_queuecommand( | |||
3584 | return rc; | 3584 | return rc; |
3585 | } | 3585 | } |
3586 | 3586 | ||
3587 | static DEF_SCSI_QCMD(pmcraid_queuecommand) | ||
3588 | |||
3587 | /** | 3589 | /** |
3588 | * pmcraid_open -char node "open" entry, allowed only users with admin access | 3590 | * pmcraid_open -char node "open" entry, allowed only users with admin access |
3589 | */ | 3591 | */ |
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index 7bc2d796e403..d164c9639361 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c | |||
@@ -798,7 +798,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) | |||
798 | return 0; | 798 | return 0; |
799 | } | 799 | } |
800 | 800 | ||
801 | static int ppa_queuecommand(struct scsi_cmnd *cmd, | 801 | static int ppa_queuecommand_lck(struct scsi_cmnd *cmd, |
802 | void (*done) (struct scsi_cmnd *)) | 802 | void (*done) (struct scsi_cmnd *)) |
803 | { | 803 | { |
804 | ppa_struct *dev = ppa_dev(cmd->device->host); | 804 | ppa_struct *dev = ppa_dev(cmd->device->host); |
@@ -821,6 +821,8 @@ static int ppa_queuecommand(struct scsi_cmnd *cmd, | |||
821 | return 0; | 821 | return 0; |
822 | } | 822 | } |
823 | 823 | ||
824 | static DEF_SCSI_QCMD(ppa_queuecommand) | ||
825 | |||
824 | /* | 826 | /* |
825 | * Apparently the disk->capacity attribute is off by 1 sector | 827 | * Apparently the disk->capacity attribute is off by 1 sector |
826 | * for all disk drives. We add the one here, but it should really | 828 | * for all disk drives. We add the one here, but it should really |
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c index 92ffbb510498..cd178b9e40cd 100644 --- a/drivers/scsi/ps3rom.c +++ b/drivers/scsi/ps3rom.c | |||
@@ -211,7 +211,7 @@ static int ps3rom_write_request(struct ps3_storage_device *dev, | |||
211 | return 0; | 211 | return 0; |
212 | } | 212 | } |
213 | 213 | ||
214 | static int ps3rom_queuecommand(struct scsi_cmnd *cmd, | 214 | static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd, |
215 | void (*done)(struct scsi_cmnd *)) | 215 | void (*done)(struct scsi_cmnd *)) |
216 | { | 216 | { |
217 | struct ps3rom_private *priv = shost_priv(cmd->device->host); | 217 | struct ps3rom_private *priv = shost_priv(cmd->device->host); |
@@ -260,6 +260,8 @@ static int ps3rom_queuecommand(struct scsi_cmnd *cmd, | |||
260 | return 0; | 260 | return 0; |
261 | } | 261 | } |
262 | 262 | ||
263 | static DEF_SCSI_QCMD(ps3rom_queuecommand) | ||
264 | |||
263 | static int decode_lv1_status(u64 status, unsigned char *sense_key, | 265 | static int decode_lv1_status(u64 status, unsigned char *sense_key, |
264 | unsigned char *asc, unsigned char *ascq) | 266 | unsigned char *asc, unsigned char *ascq) |
265 | { | 267 | { |
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index b8166ecfd0e3..5dec684bf010 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c | |||
@@ -727,7 +727,7 @@ qla1280_info(struct Scsi_Host *host) | |||
727 | * context which is a big NO! NO!. | 727 | * context which is a big NO! NO!. |
728 | **************************************************************************/ | 728 | **************************************************************************/ |
729 | static int | 729 | static int |
730 | qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) | 730 | qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) |
731 | { | 731 | { |
732 | struct Scsi_Host *host = cmd->device->host; | 732 | struct Scsi_Host *host = cmd->device->host; |
733 | struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; | 733 | struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; |
@@ -756,6 +756,8 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) | |||
756 | return status; | 756 | return status; |
757 | } | 757 | } |
758 | 758 | ||
759 | static DEF_SCSI_QCMD(qla1280_queuecommand) | ||
760 | |||
759 | enum action { | 761 | enum action { |
760 | ABORT_COMMAND, | 762 | ABORT_COMMAND, |
761 | DEVICE_RESET, | 763 | DEVICE_RESET, |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1830e6e97315..1644eabaafeb 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -179,8 +179,7 @@ static int qla2xxx_slave_alloc(struct scsi_device *); | |||
179 | static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time); | 179 | static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time); |
180 | static void qla2xxx_scan_start(struct Scsi_Host *); | 180 | static void qla2xxx_scan_start(struct Scsi_Host *); |
181 | static void qla2xxx_slave_destroy(struct scsi_device *); | 181 | static void qla2xxx_slave_destroy(struct scsi_device *); |
182 | static int qla2xxx_queuecommand(struct scsi_cmnd *cmd, | 182 | static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
183 | void (*fn)(struct scsi_cmnd *)); | ||
184 | static int qla2xxx_eh_abort(struct scsi_cmnd *); | 183 | static int qla2xxx_eh_abort(struct scsi_cmnd *); |
185 | static int qla2xxx_eh_device_reset(struct scsi_cmnd *); | 184 | static int qla2xxx_eh_device_reset(struct scsi_cmnd *); |
186 | static int qla2xxx_eh_target_reset(struct scsi_cmnd *); | 185 | static int qla2xxx_eh_target_reset(struct scsi_cmnd *); |
@@ -535,7 +534,7 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport, | |||
535 | } | 534 | } |
536 | 535 | ||
537 | static int | 536 | static int |
538 | qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 537 | qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
539 | { | 538 | { |
540 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); | 539 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); |
541 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; | 540 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; |
@@ -609,6 +608,8 @@ qc24_fail_command: | |||
609 | return 0; | 608 | return 0; |
610 | } | 609 | } |
611 | 610 | ||
611 | static DEF_SCSI_QCMD(qla2xxx_queuecommand) | ||
612 | |||
612 | 613 | ||
613 | /* | 614 | /* |
614 | * qla2x00_eh_wait_on_command | 615 | * qla2x00_eh_wait_on_command |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index f4cd846abf6d..0d48fb4d1044 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -79,8 +79,7 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); | |||
79 | /* | 79 | /* |
80 | * SCSI host template entry points | 80 | * SCSI host template entry points |
81 | */ | 81 | */ |
82 | static int qla4xxx_queuecommand(struct scsi_cmnd *cmd, | 82 | static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
83 | void (*done) (struct scsi_cmnd *)); | ||
84 | static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); | 83 | static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); |
85 | static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); | 84 | static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); |
86 | static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); | 85 | static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); |
@@ -464,7 +463,7 @@ void qla4xxx_srb_compl(struct kref *ref) | |||
464 | * completion handling). Unfortunely, it sometimes calls the scheduler | 463 | * completion handling). Unfortunely, it sometimes calls the scheduler |
465 | * in interrupt context which is a big NO! NO!. | 464 | * in interrupt context which is a big NO! NO!. |
466 | **/ | 465 | **/ |
467 | static int qla4xxx_queuecommand(struct scsi_cmnd *cmd, | 466 | static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd, |
468 | void (*done)(struct scsi_cmnd *)) | 467 | void (*done)(struct scsi_cmnd *)) |
469 | { | 468 | { |
470 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); | 469 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); |
@@ -538,6 +537,8 @@ qc_fail_command: | |||
538 | return 0; | 537 | return 0; |
539 | } | 538 | } |
540 | 539 | ||
540 | static DEF_SCSI_QCMD(qla4xxx_queuecommand) | ||
541 | |||
541 | /** | 542 | /** |
542 | * qla4xxx_mem_free - frees memory allocated to adapter | 543 | * qla4xxx_mem_free - frees memory allocated to adapter |
543 | * @ha: Pointer to host adapter structure. | 544 | * @ha: Pointer to host adapter structure. |
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c index 1ad51552d6b1..c3a9151ca823 100644 --- a/drivers/scsi/qlogicfas408.c +++ b/drivers/scsi/qlogicfas408.c | |||
@@ -439,7 +439,7 @@ irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id) | |||
439 | * Queued command | 439 | * Queued command |
440 | */ | 440 | */ |
441 | 441 | ||
442 | int qlogicfas408_queuecommand(struct scsi_cmnd *cmd, | 442 | static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd, |
443 | void (*done) (struct scsi_cmnd *)) | 443 | void (*done) (struct scsi_cmnd *)) |
444 | { | 444 | { |
445 | struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); | 445 | struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); |
@@ -459,6 +459,8 @@ int qlogicfas408_queuecommand(struct scsi_cmnd *cmd, | |||
459 | return 0; | 459 | return 0; |
460 | } | 460 | } |
461 | 461 | ||
462 | DEF_SCSI_QCMD(qlogicfas408_queuecommand) | ||
463 | |||
462 | /* | 464 | /* |
463 | * Return bios parameters | 465 | * Return bios parameters |
464 | */ | 466 | */ |
diff --git a/drivers/scsi/qlogicfas408.h b/drivers/scsi/qlogicfas408.h index 260626427a32..2f6c0a166200 100644 --- a/drivers/scsi/qlogicfas408.h +++ b/drivers/scsi/qlogicfas408.h | |||
@@ -103,8 +103,7 @@ struct qlogicfas408_priv { | |||
103 | #define get_priv_by_host(x) (struct qlogicfas408_priv *)&((x)->hostdata[0]) | 103 | #define get_priv_by_host(x) (struct qlogicfas408_priv *)&((x)->hostdata[0]) |
104 | 104 | ||
105 | irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id); | 105 | irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id); |
106 | int qlogicfas408_queuecommand(struct scsi_cmnd * cmd, | 106 | int qlogicfas408_queuecommand(struct Scsi_Host *h, struct scsi_cmnd * cmd); |
107 | void (*done) (struct scsi_cmnd *)); | ||
108 | int qlogicfas408_biosparam(struct scsi_device * disk, | 107 | int qlogicfas408_biosparam(struct scsi_device * disk, |
109 | struct block_device *dev, | 108 | struct block_device *dev, |
110 | sector_t capacity, int ip[]); | 109 | sector_t capacity, int ip[]); |
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index f8c561cf751e..664c9572d0c9 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
@@ -1003,7 +1003,7 @@ static int qlogicpti_slave_configure(struct scsi_device *sdev) | |||
1003 | * | 1003 | * |
1004 | * "This code must fly." -davem | 1004 | * "This code must fly." -davem |
1005 | */ | 1005 | */ |
1006 | static int qlogicpti_queuecommand(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *)) | 1006 | static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *)) |
1007 | { | 1007 | { |
1008 | struct Scsi_Host *host = Cmnd->device->host; | 1008 | struct Scsi_Host *host = Cmnd->device->host; |
1009 | struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; | 1009 | struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; |
@@ -1052,6 +1052,8 @@ toss_command: | |||
1052 | return 1; | 1052 | return 1; |
1053 | } | 1053 | } |
1054 | 1054 | ||
1055 | static DEF_SCSI_QCMD(qlogicpti_queuecommand) | ||
1056 | |||
1055 | static int qlogicpti_return_status(struct Status_Entry *sts, int id) | 1057 | static int qlogicpti_return_status(struct Status_Entry *sts, int id) |
1056 | { | 1058 | { |
1057 | int host_status = DID_ERROR; | 1059 | int host_status = DID_ERROR; |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 348fba0a8976..2aeb2e9c4d3b 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -634,12 +634,13 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) | |||
634 | * Description: a serial number identifies a request for error recovery | 634 | * Description: a serial number identifies a request for error recovery |
635 | * and debugging purposes. Protected by the Host_Lock of host. | 635 | * and debugging purposes. Protected by the Host_Lock of host. |
636 | */ | 636 | */ |
637 | static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) | 637 | void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) |
638 | { | 638 | { |
639 | cmd->serial_number = host->cmd_serial_number++; | 639 | cmd->serial_number = host->cmd_serial_number++; |
640 | if (cmd->serial_number == 0) | 640 | if (cmd->serial_number == 0) |
641 | cmd->serial_number = host->cmd_serial_number++; | 641 | cmd->serial_number = host->cmd_serial_number++; |
642 | } | 642 | } |
643 | EXPORT_SYMBOL(scsi_cmd_get_serial); | ||
643 | 644 | ||
644 | /** | 645 | /** |
645 | * scsi_dispatch_command - Dispatch a command to the low-level driver. | 646 | * scsi_dispatch_command - Dispatch a command to the low-level driver. |
@@ -651,7 +652,6 @@ static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd | |||
651 | int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | 652 | int scsi_dispatch_cmd(struct scsi_cmnd *cmd) |
652 | { | 653 | { |
653 | struct Scsi_Host *host = cmd->device->host; | 654 | struct Scsi_Host *host = cmd->device->host; |
654 | unsigned long flags = 0; | ||
655 | unsigned long timeout; | 655 | unsigned long timeout; |
656 | int rtn = 0; | 656 | int rtn = 0; |
657 | 657 | ||
@@ -737,23 +737,15 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
737 | goto out; | 737 | goto out; |
738 | } | 738 | } |
739 | 739 | ||
740 | spin_lock_irqsave(host->host_lock, flags); | ||
741 | /* | ||
742 | * AK: unlikely race here: for some reason the timer could | ||
743 | * expire before the serial number is set up below. | ||
744 | * | ||
745 | * TODO: kill serial or move to blk layer | ||
746 | */ | ||
747 | scsi_cmd_get_serial(host, cmd); | ||
748 | |||
749 | if (unlikely(host->shost_state == SHOST_DEL)) { | 740 | if (unlikely(host->shost_state == SHOST_DEL)) { |
750 | cmd->result = (DID_NO_CONNECT << 16); | 741 | cmd->result = (DID_NO_CONNECT << 16); |
751 | scsi_done(cmd); | 742 | scsi_done(cmd); |
752 | } else { | 743 | } else { |
753 | trace_scsi_dispatch_cmd_start(cmd); | 744 | trace_scsi_dispatch_cmd_start(cmd); |
754 | rtn = host->hostt->queuecommand(cmd, scsi_done); | 745 | cmd->scsi_done = scsi_done; |
746 | rtn = host->hostt->queuecommand(host, cmd); | ||
755 | } | 747 | } |
756 | spin_unlock_irqrestore(host->host_lock, flags); | 748 | |
757 | if (rtn) { | 749 | if (rtn) { |
758 | trace_scsi_dispatch_cmd_error(cmd, rtn); | 750 | trace_scsi_dispatch_cmd_error(cmd, rtn); |
759 | if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && | 751 | if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 2c36bae3bd4b..2f1f9b079b10 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -3538,7 +3538,7 @@ static void sdebug_remove_adapter(void) | |||
3538 | } | 3538 | } |
3539 | 3539 | ||
3540 | static | 3540 | static |
3541 | int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done) | 3541 | int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) |
3542 | { | 3542 | { |
3543 | unsigned char *cmd = (unsigned char *) SCpnt->cmnd; | 3543 | unsigned char *cmd = (unsigned char *) SCpnt->cmnd; |
3544 | int len, k; | 3544 | int len, k; |
@@ -3884,6 +3884,8 @@ write: | |||
3884 | (delay_override ? 0 : scsi_debug_delay)); | 3884 | (delay_override ? 0 : scsi_debug_delay)); |
3885 | } | 3885 | } |
3886 | 3886 | ||
3887 | static DEF_SCSI_QCMD(scsi_debug_queuecommand) | ||
3888 | |||
3887 | static struct scsi_host_template sdebug_driver_template = { | 3889 | static struct scsi_host_template sdebug_driver_template = { |
3888 | .proc_info = scsi_debug_proc_info, | 3890 | .proc_info = scsi_debug_proc_info, |
3889 | .proc_name = sdebug_proc_name, | 3891 | .proc_name = sdebug_proc_name, |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index f3cf924a2cd9..824b8fc03ce5 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -773,17 +773,15 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, | |||
773 | struct Scsi_Host *shost = sdev->host; | 773 | struct Scsi_Host *shost = sdev->host; |
774 | DECLARE_COMPLETION_ONSTACK(done); | 774 | DECLARE_COMPLETION_ONSTACK(done); |
775 | unsigned long timeleft; | 775 | unsigned long timeleft; |
776 | unsigned long flags; | ||
777 | struct scsi_eh_save ses; | 776 | struct scsi_eh_save ses; |
778 | int rtn; | 777 | int rtn; |
779 | 778 | ||
780 | scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes); | 779 | scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes); |
781 | shost->eh_action = &done; | 780 | shost->eh_action = &done; |
782 | 781 | ||
783 | spin_lock_irqsave(shost->host_lock, flags); | ||
784 | scsi_log_send(scmd); | 782 | scsi_log_send(scmd); |
785 | shost->hostt->queuecommand(scmd, scsi_eh_done); | 783 | scmd->scsi_done = scsi_eh_done; |
786 | spin_unlock_irqrestore(shost->host_lock, flags); | 784 | shost->hostt->queuecommand(shost, scmd); |
787 | 785 | ||
788 | timeleft = wait_for_completion_timeout(&done, timeout); | 786 | timeleft = wait_for_completion_timeout(&done, timeout); |
789 | 787 | ||
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b9ab3a590e4b..956496182c80 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -46,7 +46,6 @@ | |||
46 | #include <linux/blkdev.h> | 46 | #include <linux/blkdev.h> |
47 | #include <linux/blkpg.h> | 47 | #include <linux/blkpg.h> |
48 | #include <linux/delay.h> | 48 | #include <linux/delay.h> |
49 | #include <linux/smp_lock.h> | ||
50 | #include <linux/mutex.h> | 49 | #include <linux/mutex.h> |
51 | #include <linux/string_helpers.h> | 50 | #include <linux/string_helpers.h> |
52 | #include <linux/async.h> | 51 | #include <linux/async.h> |
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 9c73dbda3bbb..606215e54b88 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c | |||
@@ -572,7 +572,7 @@ stex_slave_destroy(struct scsi_device *sdev) | |||
572 | } | 572 | } |
573 | 573 | ||
574 | static int | 574 | static int |
575 | stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | 575 | stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
576 | { | 576 | { |
577 | struct st_hba *hba; | 577 | struct st_hba *hba; |
578 | struct Scsi_Host *host; | 578 | struct Scsi_Host *host; |
@@ -698,6 +698,8 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
698 | return 0; | 698 | return 0; |
699 | } | 699 | } |
700 | 700 | ||
701 | static DEF_SCSI_QCMD(stex_queuecommand) | ||
702 | |||
701 | static void stex_scsi_done(struct st_ccb *ccb) | 703 | static void stex_scsi_done(struct st_ccb *ccb) |
702 | { | 704 | { |
703 | struct scsi_cmnd *cmd = ccb->cmd; | 705 | struct scsi_cmnd *cmd = ccb->cmd; |
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c index 713620ed70d9..4f0e5485ffde 100644 --- a/drivers/scsi/sun3_NCR5380.c +++ b/drivers/scsi/sun3_NCR5380.c | |||
@@ -908,7 +908,7 @@ static int NCR5380_init (struct Scsi_Host *instance, int flags) | |||
908 | */ | 908 | */ |
909 | 909 | ||
910 | /* Only make static if a wrapper function is used */ | 910 | /* Only make static if a wrapper function is used */ |
911 | static int NCR5380_queue_command(struct scsi_cmnd *cmd, | 911 | static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, |
912 | void (*done)(struct scsi_cmnd *)) | 912 | void (*done)(struct scsi_cmnd *)) |
913 | { | 913 | { |
914 | SETUP_HOSTDATA(cmd->device->host); | 914 | SETUP_HOSTDATA(cmd->device->host); |
@@ -1019,6 +1019,8 @@ static int NCR5380_queue_command(struct scsi_cmnd *cmd, | |||
1019 | return 0; | 1019 | return 0; |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | static DEF_SCSI_QCMD(NCR5380_queue_command) | ||
1023 | |||
1022 | /* | 1024 | /* |
1023 | * Function : NCR5380_main (void) | 1025 | * Function : NCR5380_main (void) |
1024 | * | 1026 | * |
diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h index b29a9d661ca4..bcefd8458e65 100644 --- a/drivers/scsi/sun3_scsi.h +++ b/drivers/scsi/sun3_scsi.h | |||
@@ -51,8 +51,7 @@ static int sun3scsi_abort(struct scsi_cmnd *); | |||
51 | static int sun3scsi_detect (struct scsi_host_template *); | 51 | static int sun3scsi_detect (struct scsi_host_template *); |
52 | static const char *sun3scsi_info (struct Scsi_Host *); | 52 | static const char *sun3scsi_info (struct Scsi_Host *); |
53 | static int sun3scsi_bus_reset(struct scsi_cmnd *); | 53 | static int sun3scsi_bus_reset(struct scsi_cmnd *); |
54 | static int sun3scsi_queue_command(struct scsi_cmnd *, | 54 | static int sun3scsi_queue_command(struct Scsi_Host *, struct scsi_cmnd *); |
55 | void (*done)(struct scsi_cmnd *)); | ||
56 | static int sun3scsi_release (struct Scsi_Host *); | 55 | static int sun3scsi_release (struct Scsi_Host *); |
57 | 56 | ||
58 | #ifndef CMD_PER_LUN | 57 | #ifndef CMD_PER_LUN |
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c index e5c369bb568f..190107ae120b 100644 --- a/drivers/scsi/sym53c416.c +++ b/drivers/scsi/sym53c416.c | |||
@@ -734,7 +734,7 @@ const char *sym53c416_info(struct Scsi_Host *SChost) | |||
734 | return info; | 734 | return info; |
735 | } | 735 | } |
736 | 736 | ||
737 | int sym53c416_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) | 737 | static int sym53c416_queuecommand_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) |
738 | { | 738 | { |
739 | int base; | 739 | int base; |
740 | unsigned long flags = 0; | 740 | unsigned long flags = 0; |
@@ -761,6 +761,8 @@ int sym53c416_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) | |||
761 | return 0; | 761 | return 0; |
762 | } | 762 | } |
763 | 763 | ||
764 | DEF_SCSI_QCMD(sym53c416_queuecommand) | ||
765 | |||
764 | static int sym53c416_host_reset(Scsi_Cmnd *SCpnt) | 766 | static int sym53c416_host_reset(Scsi_Cmnd *SCpnt) |
765 | { | 767 | { |
766 | int base; | 768 | int base; |
diff --git a/drivers/scsi/sym53c416.h b/drivers/scsi/sym53c416.h index 77860d0748ff..387de5d80a70 100644 --- a/drivers/scsi/sym53c416.h +++ b/drivers/scsi/sym53c416.h | |||
@@ -25,7 +25,7 @@ | |||
25 | static int sym53c416_detect(struct scsi_host_template *); | 25 | static int sym53c416_detect(struct scsi_host_template *); |
26 | static const char *sym53c416_info(struct Scsi_Host *); | 26 | static const char *sym53c416_info(struct Scsi_Host *); |
27 | static int sym53c416_release(struct Scsi_Host *); | 27 | static int sym53c416_release(struct Scsi_Host *); |
28 | static int sym53c416_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); | 28 | static int sym53c416_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
29 | static int sym53c416_host_reset(Scsi_Cmnd *); | 29 | static int sym53c416_host_reset(Scsi_Cmnd *); |
30 | static int sym53c416_bios_param(struct scsi_device *, struct block_device *, | 30 | static int sym53c416_bios_param(struct scsi_device *, struct block_device *, |
31 | sector_t, int *); | 31 | sector_t, int *); |
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 8b955b534a36..6b97ded9d45d 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c | |||
@@ -505,7 +505,7 @@ void sym_log_bus_error(struct Scsi_Host *shost) | |||
505 | * queuecommand method. Entered with the host adapter lock held and | 505 | * queuecommand method. Entered with the host adapter lock held and |
506 | * interrupts disabled. | 506 | * interrupts disabled. |
507 | */ | 507 | */ |
508 | static int sym53c8xx_queue_command(struct scsi_cmnd *cmd, | 508 | static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd, |
509 | void (*done)(struct scsi_cmnd *)) | 509 | void (*done)(struct scsi_cmnd *)) |
510 | { | 510 | { |
511 | struct sym_hcb *np = SYM_SOFTC_PTR(cmd); | 511 | struct sym_hcb *np = SYM_SOFTC_PTR(cmd); |
@@ -536,6 +536,8 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd, | |||
536 | return 0; | 536 | return 0; |
537 | } | 537 | } |
538 | 538 | ||
539 | static DEF_SCSI_QCMD(sym53c8xx_queue_command) | ||
540 | |||
539 | /* | 541 | /* |
540 | * Linux entry point of the interrupt handler. | 542 | * Linux entry point of the interrupt handler. |
541 | */ | 543 | */ |
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h index 76a069b7ac0b..ada1115079c9 100644 --- a/drivers/scsi/t128.h +++ b/drivers/scsi/t128.h | |||
@@ -96,8 +96,7 @@ static int t128_abort(struct scsi_cmnd *); | |||
96 | static int t128_biosparam(struct scsi_device *, struct block_device *, | 96 | static int t128_biosparam(struct scsi_device *, struct block_device *, |
97 | sector_t, int*); | 97 | sector_t, int*); |
98 | static int t128_detect(struct scsi_host_template *); | 98 | static int t128_detect(struct scsi_host_template *); |
99 | static int t128_queue_command(struct scsi_cmnd *, | 99 | static int t128_queue_command(struct Scsi_Host *, struct scsi_cmnd *); |
100 | void (*done)(struct scsi_cmnd *)); | ||
101 | static int t128_bus_reset(struct scsi_cmnd *); | 100 | static int t128_bus_reset(struct scsi_cmnd *); |
102 | 101 | ||
103 | #ifndef CMD_PER_LUN | 102 | #ifndef CMD_PER_LUN |
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c index 27866b0adfeb..a124a28f2ccb 100644 --- a/drivers/scsi/tmscsim.c +++ b/drivers/scsi/tmscsim.c | |||
@@ -1883,7 +1883,7 @@ dc390_ScsiRstDetect( struct dc390_acb* pACB ) | |||
1883 | return; | 1883 | return; |
1884 | } | 1884 | } |
1885 | 1885 | ||
1886 | static int DC390_queuecommand(struct scsi_cmnd *cmd, | 1886 | static int DC390_queuecommand_lck(struct scsi_cmnd *cmd, |
1887 | void (*done)(struct scsi_cmnd *)) | 1887 | void (*done)(struct scsi_cmnd *)) |
1888 | { | 1888 | { |
1889 | struct scsi_device *sdev = cmd->device; | 1889 | struct scsi_device *sdev = cmd->device; |
@@ -1944,6 +1944,8 @@ static int DC390_queuecommand(struct scsi_cmnd *cmd, | |||
1944 | return SCSI_MLQUEUE_DEVICE_BUSY; | 1944 | return SCSI_MLQUEUE_DEVICE_BUSY; |
1945 | } | 1945 | } |
1946 | 1946 | ||
1947 | static DEF_SCSI_QCMD(DC390_queuecommand) | ||
1948 | |||
1947 | static void dc390_dumpinfo (struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB) | 1949 | static void dc390_dumpinfo (struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB) |
1948 | { | 1950 | { |
1949 | struct pci_dev *pdev; | 1951 | struct pci_dev *pdev; |
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c index 5d9fdeeb2315..edfc5da8be4c 100644 --- a/drivers/scsi/u14-34f.c +++ b/drivers/scsi/u14-34f.c | |||
@@ -433,7 +433,7 @@ | |||
433 | 433 | ||
434 | static int u14_34f_detect(struct scsi_host_template *); | 434 | static int u14_34f_detect(struct scsi_host_template *); |
435 | static int u14_34f_release(struct Scsi_Host *); | 435 | static int u14_34f_release(struct Scsi_Host *); |
436 | static int u14_34f_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); | 436 | static int u14_34f_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
437 | static int u14_34f_eh_abort(struct scsi_cmnd *); | 437 | static int u14_34f_eh_abort(struct scsi_cmnd *); |
438 | static int u14_34f_eh_host_reset(struct scsi_cmnd *); | 438 | static int u14_34f_eh_host_reset(struct scsi_cmnd *); |
439 | static int u14_34f_bios_param(struct scsi_device *, struct block_device *, | 439 | static int u14_34f_bios_param(struct scsi_device *, struct block_device *, |
@@ -1248,7 +1248,7 @@ static void scsi_to_dev_dir(unsigned int i, unsigned int j) { | |||
1248 | 1248 | ||
1249 | } | 1249 | } |
1250 | 1250 | ||
1251 | static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { | 1251 | static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { |
1252 | unsigned int i, j, k; | 1252 | unsigned int i, j, k; |
1253 | struct mscp *cpp; | 1253 | struct mscp *cpp; |
1254 | 1254 | ||
@@ -1329,6 +1329,8 @@ static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scs | |||
1329 | return 0; | 1329 | return 0; |
1330 | } | 1330 | } |
1331 | 1331 | ||
1332 | static DEF_SCSI_QCMD(u14_34f_queuecommand) | ||
1333 | |||
1332 | static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) { | 1334 | static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) { |
1333 | unsigned int i, j; | 1335 | unsigned int i, j; |
1334 | 1336 | ||
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c index 27aa40f3980e..0571ef9639cb 100644 --- a/drivers/scsi/ultrastor.c +++ b/drivers/scsi/ultrastor.c | |||
@@ -700,7 +700,7 @@ static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt) | |||
700 | mscp->transfer_data_length = transfer_length; | 700 | mscp->transfer_data_length = transfer_length; |
701 | } | 701 | } |
702 | 702 | ||
703 | static int ultrastor_queuecommand(struct scsi_cmnd *SCpnt, | 703 | static int ultrastor_queuecommand_lck(struct scsi_cmnd *SCpnt, |
704 | void (*done) (struct scsi_cmnd *)) | 704 | void (*done) (struct scsi_cmnd *)) |
705 | { | 705 | { |
706 | struct mscp *my_mscp; | 706 | struct mscp *my_mscp; |
@@ -825,6 +825,8 @@ retry: | |||
825 | return 0; | 825 | return 0; |
826 | } | 826 | } |
827 | 827 | ||
828 | static DEF_SCSI_QCMD(ultrastor_queuecommand) | ||
829 | |||
828 | /* This code must deal with 2 cases: | 830 | /* This code must deal with 2 cases: |
829 | 831 | ||
830 | 1. The command has not been written to the OGM. In this case, set | 832 | 1. The command has not been written to the OGM. In this case, set |
diff --git a/drivers/scsi/ultrastor.h b/drivers/scsi/ultrastor.h index a692905f95f7..165c18b5cf5f 100644 --- a/drivers/scsi/ultrastor.h +++ b/drivers/scsi/ultrastor.h | |||
@@ -15,8 +15,7 @@ | |||
15 | 15 | ||
16 | static int ultrastor_detect(struct scsi_host_template *); | 16 | static int ultrastor_detect(struct scsi_host_template *); |
17 | static const char *ultrastor_info(struct Scsi_Host *shpnt); | 17 | static const char *ultrastor_info(struct Scsi_Host *shpnt); |
18 | static int ultrastor_queuecommand(struct scsi_cmnd *, | 18 | static int ultrastor_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
19 | void (*done)(struct scsi_cmnd *)); | ||
20 | static int ultrastor_abort(struct scsi_cmnd *); | 19 | static int ultrastor_abort(struct scsi_cmnd *); |
21 | static int ultrastor_host_reset(struct scsi_cmnd *); | 20 | static int ultrastor_host_reset(struct scsi_cmnd *); |
22 | static int ultrastor_biosparam(struct scsi_device *, struct block_device *, | 21 | static int ultrastor_biosparam(struct scsi_device *, struct block_device *, |
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 26894459c37f..a18996d24466 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c | |||
@@ -690,7 +690,7 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, | |||
690 | return 0; | 690 | return 0; |
691 | } | 691 | } |
692 | 692 | ||
693 | static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | 693 | static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
694 | { | 694 | { |
695 | struct Scsi_Host *host = cmd->device->host; | 695 | struct Scsi_Host *host = cmd->device->host; |
696 | struct pvscsi_adapter *adapter = shost_priv(host); | 696 | struct pvscsi_adapter *adapter = shost_priv(host); |
@@ -719,6 +719,8 @@ static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | |||
719 | return 0; | 719 | return 0; |
720 | } | 720 | } |
721 | 721 | ||
722 | static DEF_SCSI_QCMD(pvscsi_queue) | ||
723 | |||
722 | static int pvscsi_abort(struct scsi_cmnd *cmd) | 724 | static int pvscsi_abort(struct scsi_cmnd *cmd) |
723 | { | 725 | { |
724 | struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); | 726 | struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); |
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index b701bf2cc187..5f697e0bd009 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c | |||
@@ -371,8 +371,8 @@ calc_sync_msg(unsigned int period, unsigned int offset, unsigned int fast, | |||
371 | msg[1] = offset; | 371 | msg[1] = offset; |
372 | } | 372 | } |
373 | 373 | ||
374 | int | 374 | static int |
375 | wd33c93_queuecommand(struct scsi_cmnd *cmd, | 375 | wd33c93_queuecommand_lck(struct scsi_cmnd *cmd, |
376 | void (*done)(struct scsi_cmnd *)) | 376 | void (*done)(struct scsi_cmnd *)) |
377 | { | 377 | { |
378 | struct WD33C93_hostdata *hostdata; | 378 | struct WD33C93_hostdata *hostdata; |
@@ -468,6 +468,8 @@ wd33c93_queuecommand(struct scsi_cmnd *cmd, | |||
468 | return 0; | 468 | return 0; |
469 | } | 469 | } |
470 | 470 | ||
471 | DEF_SCSI_QCMD(wd33c93_queuecommand) | ||
472 | |||
471 | /* | 473 | /* |
472 | * This routine attempts to start a scsi command. If the host_card is | 474 | * This routine attempts to start a scsi command. If the host_card is |
473 | * already connected, we give up immediately. Otherwise, look through | 475 | * already connected, we give up immediately. Otherwise, look through |
diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h index 1ed5f3bf388e..3b463d7304dc 100644 --- a/drivers/scsi/wd33c93.h +++ b/drivers/scsi/wd33c93.h | |||
@@ -343,8 +343,7 @@ struct WD33C93_hostdata { | |||
343 | void wd33c93_init (struct Scsi_Host *instance, const wd33c93_regs regs, | 343 | void wd33c93_init (struct Scsi_Host *instance, const wd33c93_regs regs, |
344 | dma_setup_t setup, dma_stop_t stop, int clock_freq); | 344 | dma_setup_t setup, dma_stop_t stop, int clock_freq); |
345 | int wd33c93_abort (struct scsi_cmnd *cmd); | 345 | int wd33c93_abort (struct scsi_cmnd *cmd); |
346 | int wd33c93_queuecommand (struct scsi_cmnd *cmd, | 346 | int wd33c93_queuecommand (struct Scsi_Host *h, struct scsi_cmnd *cmd); |
347 | void (*done)(struct scsi_cmnd *)); | ||
348 | void wd33c93_intr (struct Scsi_Host *instance); | 347 | void wd33c93_intr (struct Scsi_Host *instance); |
349 | int wd33c93_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int); | 348 | int wd33c93_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int); |
350 | int wd33c93_host_reset (struct scsi_cmnd *); | 349 | int wd33c93_host_reset (struct scsi_cmnd *); |
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c index 333580bf37c5..db451ae0a368 100644 --- a/drivers/scsi/wd7000.c +++ b/drivers/scsi/wd7000.c | |||
@@ -1082,7 +1082,7 @@ static irqreturn_t wd7000_intr(int irq, void *dev_id) | |||
1082 | return IRQ_HANDLED; | 1082 | return IRQ_HANDLED; |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | static int wd7000_queuecommand(struct scsi_cmnd *SCpnt, | 1085 | static int wd7000_queuecommand_lck(struct scsi_cmnd *SCpnt, |
1086 | void (*done)(struct scsi_cmnd *)) | 1086 | void (*done)(struct scsi_cmnd *)) |
1087 | { | 1087 | { |
1088 | Scb *scb; | 1088 | Scb *scb; |
@@ -1139,6 +1139,8 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt, | |||
1139 | return 0; | 1139 | return 0; |
1140 | } | 1140 | } |
1141 | 1141 | ||
1142 | static DEF_SCSI_QCMD(wd7000_queuecommand) | ||
1143 | |||
1142 | static int wd7000_diagnostics(Adapter * host, int code) | 1144 | static int wd7000_diagnostics(Adapter * host, int code) |
1143 | { | 1145 | { |
1144 | static IcbDiag icb = { ICB_OP_DIAGNOSTICS }; | 1146 | static IcbDiag icb = { ICB_OP_DIAGNOSTICS }; |
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c index fa62578fcd20..bcc31f2140ac 100644 --- a/drivers/serial/crisv10.c +++ b/drivers/serial/crisv10.c | |||
@@ -18,7 +18,6 @@ static char *serial_version = "$Revision: 1.25 $"; | |||
18 | #include <linux/tty.h> | 18 | #include <linux/tty.h> |
19 | #include <linux/tty_flip.h> | 19 | #include <linux/tty_flip.h> |
20 | #include <linux/major.h> | 20 | #include <linux/major.h> |
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/string.h> | 21 | #include <linux/string.h> |
23 | #include <linux/fcntl.h> | 22 | #include <linux/fcntl.h> |
24 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index c4ea14670d44..9ffa5bee44ab 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/console.h> | 29 | #include <linux/console.h> |
30 | #include <linux/proc_fs.h> | 30 | #include <linux/proc_fs.h> |
31 | #include <linux/seq_file.h> | 31 | #include <linux/seq_file.h> |
32 | #include <linux/smp_lock.h> | ||
33 | #include <linux/device.h> | 32 | #include <linux/device.h> |
34 | #include <linux/serial.h> /* for serial_state and serial_icounter_struct */ | 33 | #include <linux/serial.h> /* for serial_state and serial_icounter_struct */ |
35 | #include <linux/serial_core.h> | 34 | #include <linux/serial_core.h> |
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c index 09615b51d591..3f5e387ed564 100644 --- a/drivers/sh/clk/core.c +++ b/drivers/sh/clk/core.c | |||
@@ -418,8 +418,11 @@ int clk_register(struct clk *clk) | |||
418 | list_add(&clk->sibling, &root_clks); | 418 | list_add(&clk->sibling, &root_clks); |
419 | 419 | ||
420 | list_add(&clk->node, &clock_list); | 420 | list_add(&clk->node, &clock_list); |
421 | |||
422 | #ifdef CONFIG_SH_CLK_CPG_LEGACY | ||
421 | if (clk->ops && clk->ops->init) | 423 | if (clk->ops && clk->ops->init) |
422 | clk->ops->init(clk); | 424 | clk->ops->init(clk); |
425 | #endif | ||
423 | 426 | ||
424 | out_unlock: | 427 | out_unlock: |
425 | mutex_unlock(&clock_list_sem); | 428 | mutex_unlock(&clock_list_sem); |
@@ -455,19 +458,13 @@ EXPORT_SYMBOL_GPL(clk_get_rate); | |||
455 | 458 | ||
456 | int clk_set_rate(struct clk *clk, unsigned long rate) | 459 | int clk_set_rate(struct clk *clk, unsigned long rate) |
457 | { | 460 | { |
458 | return clk_set_rate_ex(clk, rate, 0); | ||
459 | } | ||
460 | EXPORT_SYMBOL_GPL(clk_set_rate); | ||
461 | |||
462 | int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) | ||
463 | { | ||
464 | int ret = -EOPNOTSUPP; | 461 | int ret = -EOPNOTSUPP; |
465 | unsigned long flags; | 462 | unsigned long flags; |
466 | 463 | ||
467 | spin_lock_irqsave(&clock_lock, flags); | 464 | spin_lock_irqsave(&clock_lock, flags); |
468 | 465 | ||
469 | if (likely(clk->ops && clk->ops->set_rate)) { | 466 | if (likely(clk->ops && clk->ops->set_rate)) { |
470 | ret = clk->ops->set_rate(clk, rate, algo_id); | 467 | ret = clk->ops->set_rate(clk, rate); |
471 | if (ret != 0) | 468 | if (ret != 0) |
472 | goto out_unlock; | 469 | goto out_unlock; |
473 | } else { | 470 | } else { |
@@ -485,7 +482,7 @@ out_unlock: | |||
485 | 482 | ||
486 | return ret; | 483 | return ret; |
487 | } | 484 | } |
488 | EXPORT_SYMBOL_GPL(clk_set_rate_ex); | 485 | EXPORT_SYMBOL_GPL(clk_set_rate); |
489 | 486 | ||
490 | int clk_set_parent(struct clk *clk, struct clk *parent) | 487 | int clk_set_parent(struct clk *clk, struct clk *parent) |
491 | { | 488 | { |
@@ -571,7 +568,7 @@ long clk_round_parent(struct clk *clk, unsigned long target, | |||
571 | *best_freq = freq_max; | 568 | *best_freq = freq_max; |
572 | } | 569 | } |
573 | 570 | ||
574 | pr_debug("too low freq %lu, error %lu\n", freq->frequency, | 571 | pr_debug("too low freq %u, error %lu\n", freq->frequency, |
575 | target - freq_max); | 572 | target - freq_max); |
576 | 573 | ||
577 | if (!error) | 574 | if (!error) |
@@ -591,7 +588,7 @@ long clk_round_parent(struct clk *clk, unsigned long target, | |||
591 | *best_freq = freq_min; | 588 | *best_freq = freq_min; |
592 | } | 589 | } |
593 | 590 | ||
594 | pr_debug("too high freq %lu, error %lu\n", freq->frequency, | 591 | pr_debug("too high freq %u, error %lu\n", freq->frequency, |
595 | freq_min - target); | 592 | freq_min - target); |
596 | 593 | ||
597 | if (!error) | 594 | if (!error) |
@@ -653,8 +650,7 @@ static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) | |||
653 | clkp->ops->set_parent(clkp, | 650 | clkp->ops->set_parent(clkp, |
654 | clkp->parent); | 651 | clkp->parent); |
655 | if (likely(clkp->ops->set_rate)) | 652 | if (likely(clkp->ops->set_rate)) |
656 | clkp->ops->set_rate(clkp, | 653 | clkp->ops->set_rate(clkp, rate); |
657 | rate, NO_CHANGE); | ||
658 | else if (likely(clkp->ops->recalc)) | 654 | else if (likely(clkp->ops->recalc)) |
659 | clkp->rate = clkp->ops->recalc(clkp); | 655 | clkp->rate = clkp->ops->recalc(clkp); |
660 | } | 656 | } |
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c index 3aea5f0ceb09..6172335ae323 100644 --- a/drivers/sh/clk/cpg.c +++ b/drivers/sh/clk/cpg.c | |||
@@ -110,8 +110,7 @@ static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent) | |||
110 | return 0; | 110 | return 0; |
111 | } | 111 | } |
112 | 112 | ||
113 | static int sh_clk_div6_set_rate(struct clk *clk, | 113 | static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate) |
114 | unsigned long rate, int algo_id) | ||
115 | { | 114 | { |
116 | unsigned long value; | 115 | unsigned long value; |
117 | int idx; | 116 | int idx; |
@@ -132,7 +131,7 @@ static int sh_clk_div6_enable(struct clk *clk) | |||
132 | unsigned long value; | 131 | unsigned long value; |
133 | int ret; | 132 | int ret; |
134 | 133 | ||
135 | ret = sh_clk_div6_set_rate(clk, clk->rate, 0); | 134 | ret = sh_clk_div6_set_rate(clk, clk->rate); |
136 | if (ret == 0) { | 135 | if (ret == 0) { |
137 | value = __raw_readl(clk->enable_reg); | 136 | value = __raw_readl(clk->enable_reg); |
138 | value &= ~0x100; /* clear stop bit to enable clock */ | 137 | value &= ~0x100; /* clear stop bit to enable clock */ |
@@ -253,7 +252,7 @@ static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent) | |||
253 | return 0; | 252 | return 0; |
254 | } | 253 | } |
255 | 254 | ||
256 | static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id) | 255 | static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate) |
257 | { | 256 | { |
258 | struct clk_div4_table *d4t = clk->priv; | 257 | struct clk_div4_table *d4t = clk->priv; |
259 | unsigned long value; | 258 | unsigned long value; |
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c index e5bf5d3c698e..4e0ff7181164 100644 --- a/drivers/sh/intc/virq.c +++ b/drivers/sh/intc/virq.c | |||
@@ -215,7 +215,7 @@ restart: | |||
215 | entry = radix_tree_deref_slot((void **)entries[i]); | 215 | entry = radix_tree_deref_slot((void **)entries[i]); |
216 | if (unlikely(!entry)) | 216 | if (unlikely(!entry)) |
217 | continue; | 217 | continue; |
218 | if (unlikely(entry == RADIX_TREE_RETRY)) | 218 | if (radix_tree_deref_retry(entry)) |
219 | goto restart; | 219 | goto restart; |
220 | 220 | ||
221 | irq = create_irq(); | 221 | irq = create_irq(); |
diff --git a/drivers/staging/easycap/easycap.h b/drivers/staging/easycap/easycap.h index f3c827eb0abe..25961c23dc0f 100644 --- a/drivers/staging/easycap/easycap.h +++ b/drivers/staging/easycap/easycap.h | |||
@@ -77,7 +77,6 @@ | |||
77 | #include <linux/slab.h> | 77 | #include <linux/slab.h> |
78 | #include <linux/module.h> | 78 | #include <linux/module.h> |
79 | #include <linux/kref.h> | 79 | #include <linux/kref.h> |
80 | #include <linux/smp_lock.h> | ||
81 | #include <linux/usb.h> | 80 | #include <linux/usb.h> |
82 | #include <linux/uaccess.h> | 81 | #include <linux/uaccess.h> |
83 | 82 | ||
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c index 41d9acf4cd61..6f8d67d0d64f 100644 --- a/drivers/staging/hv/storvsc_drv.c +++ b/drivers/staging/hv/storvsc_drv.c | |||
@@ -72,8 +72,7 @@ struct storvsc_driver_context { | |||
72 | 72 | ||
73 | /* Static decl */ | 73 | /* Static decl */ |
74 | static int storvsc_probe(struct device *dev); | 74 | static int storvsc_probe(struct device *dev); |
75 | static int storvsc_queuecommand(struct scsi_cmnd *scmnd, | 75 | static int storvsc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd); |
76 | void (*done)(struct scsi_cmnd *)); | ||
77 | static int storvsc_device_alloc(struct scsi_device *); | 76 | static int storvsc_device_alloc(struct scsi_device *); |
78 | static int storvsc_device_configure(struct scsi_device *); | 77 | static int storvsc_device_configure(struct scsi_device *); |
79 | static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd); | 78 | static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd); |
@@ -595,7 +594,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, | |||
595 | /* | 594 | /* |
596 | * storvsc_queuecommand - Initiate command processing | 595 | * storvsc_queuecommand - Initiate command processing |
597 | */ | 596 | */ |
598 | static int storvsc_queuecommand(struct scsi_cmnd *scmnd, | 597 | static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd, |
599 | void (*done)(struct scsi_cmnd *)) | 598 | void (*done)(struct scsi_cmnd *)) |
600 | { | 599 | { |
601 | int ret; | 600 | int ret; |
@@ -783,6 +782,8 @@ retry_request: | |||
783 | return ret; | 782 | return ret; |
784 | } | 783 | } |
785 | 784 | ||
785 | static DEF_SCSI_QCMD(storvsc_queuecommand) | ||
786 | |||
786 | static int storvsc_merge_bvec(struct request_queue *q, | 787 | static int storvsc_merge_bvec(struct request_queue *q, |
787 | struct bvec_merge_data *bmd, struct bio_vec *bvec) | 788 | struct bvec_merge_data *bmd, struct bio_vec *bvec) |
788 | { | 789 | { |
diff --git a/drivers/staging/intel_sst/intel_sst_app_interface.c b/drivers/staging/intel_sst/intel_sst_app_interface.c index 9618c7997461..991440015e92 100644 --- a/drivers/staging/intel_sst/intel_sst_app_interface.c +++ b/drivers/staging/intel_sst/intel_sst_app_interface.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/uaccess.h> | 34 | #include <linux/uaccess.h> |
35 | #include <linux/firmware.h> | 35 | #include <linux/firmware.h> |
36 | #include <linux/ioctl.h> | 36 | #include <linux/ioctl.h> |
37 | #include <linux/smp_lock.h> | ||
38 | #ifdef CONFIG_MRST_RAR_HANDLER | 37 | #ifdef CONFIG_MRST_RAR_HANDLER |
39 | #include <linux/rar_register.h> | 38 | #include <linux/rar_register.h> |
40 | #include "../../../drivers/staging/memrar/memrar.h" | 39 | #include "../../../drivers/staging/memrar/memrar.h" |
diff --git a/drivers/staging/keucr/scsiglue.c b/drivers/staging/keucr/scsiglue.c index a2671404f7ac..da4f42af3838 100644 --- a/drivers/staging/keucr/scsiglue.c +++ b/drivers/staging/keucr/scsiglue.c | |||
@@ -87,7 +87,7 @@ static int slave_configure(struct scsi_device *sdev) | |||
87 | 87 | ||
88 | /* This is always called with scsi_lock(host) held */ | 88 | /* This is always called with scsi_lock(host) held */ |
89 | //----- queuecommand() --------------------- | 89 | //----- queuecommand() --------------------- |
90 | static int queuecommand(struct scsi_cmnd *srb, void (*done)(struct scsi_cmnd *)) | 90 | static int queuecommand_lck(struct scsi_cmnd *srb, void (*done)(struct scsi_cmnd *)) |
91 | { | 91 | { |
92 | struct us_data *us = host_to_us(srb->device->host); | 92 | struct us_data *us = host_to_us(srb->device->host); |
93 | 93 | ||
@@ -117,6 +117,8 @@ static int queuecommand(struct scsi_cmnd *srb, void (*done)(struct scsi_cmnd *)) | |||
117 | return 0; | 117 | return 0; |
118 | } | 118 | } |
119 | 119 | ||
120 | static DEF_SCSI_QCMD(queuecommand) | ||
121 | |||
120 | /*********************************************************************** | 122 | /*********************************************************************** |
121 | * Error handling functions | 123 | * Error handling functions |
122 | ***********************************************************************/ | 124 | ***********************************************************************/ |
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c index 75aa7a36307d..4ca45ec7fd84 100644 --- a/drivers/staging/olpc_dcon/olpc_dcon.c +++ b/drivers/staging/olpc_dcon/olpc_dcon.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/console.h> | 17 | #include <linux/console.h> |
18 | #include <linux/i2c.h> | 18 | #include <linux/i2c.h> |
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/i2c-id.h> | ||
21 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
22 | #include <linux/pci_ids.h> | 21 | #include <linux/pci_ids.h> |
23 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
@@ -733,7 +732,6 @@ static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
733 | edev: | 732 | edev: |
734 | platform_device_unregister(dcon_device); | 733 | platform_device_unregister(dcon_device); |
735 | dcon_device = NULL; | 734 | dcon_device = NULL; |
736 | i2c_set_clientdata(client, NULL); | ||
737 | eirq: | 735 | eirq: |
738 | free_irq(DCON_IRQ, &dcon_driver); | 736 | free_irq(DCON_IRQ, &dcon_driver); |
739 | einit: | 737 | einit: |
@@ -757,8 +755,6 @@ static int dcon_remove(struct i2c_client *client) | |||
757 | platform_device_unregister(dcon_device); | 755 | platform_device_unregister(dcon_device); |
758 | cancel_work_sync(&dcon_work); | 756 | cancel_work_sync(&dcon_work); |
759 | 757 | ||
760 | i2c_set_clientdata(client, NULL); | ||
761 | |||
762 | return 0; | 758 | return 0; |
763 | } | 759 | } |
764 | 760 | ||
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h index d1674cd282dc..831d81e0e429 100644 --- a/drivers/staging/rtl8712/osdep_service.h +++ b/drivers/staging/rtl8712/osdep_service.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
24 | #include <linux/kref.h> | 24 | #include <linux/kref.h> |
25 | #include <linux/smp_lock.h> | ||
26 | #include <linux/netdevice.h> | 25 | #include <linux/netdevice.h> |
27 | #include <linux/skbuff.h> | 26 | #include <linux/skbuff.h> |
28 | #include <linux/usb.h> | 27 | #include <linux/usb.h> |
diff --git a/drivers/staging/speakup/buffers.c b/drivers/staging/speakup/buffers.c index b7b60d5e8660..a2db956edd54 100644 --- a/drivers/staging/speakup/buffers.c +++ b/drivers/staging/speakup/buffers.c | |||
@@ -1,5 +1,4 @@ | |||
1 | #include <linux/console.h> | 1 | #include <linux/console.h> |
2 | #include <linux/smp_lock.h> | ||
3 | #include <linux/types.h> | 2 | #include <linux/types.h> |
4 | #include <linux/wait.h> | 3 | #include <linux/wait.h> |
5 | 4 | ||
diff --git a/drivers/staging/stradis/Kconfig b/drivers/staging/stradis/Kconfig index 92e891141896..02f0fc504cf5 100644 --- a/drivers/staging/stradis/Kconfig +++ b/drivers/staging/stradis/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config VIDEO_STRADIS | 1 | config VIDEO_STRADIS |
2 | tristate "Stradis 4:2:2 MPEG-2 video driver (DEPRECATED)" | 2 | tristate "Stradis 4:2:2 MPEG-2 video driver (DEPRECATED)" |
3 | depends on EXPERIMENTAL && PCI && VIDEO_V4L1 && VIRT_TO_BUS | 3 | depends on EXPERIMENTAL && PCI && VIDEO_V4L1 && VIRT_TO_BUS && BKL |
4 | help | 4 | help |
5 | Say Y here to enable support for the Stradis 4:2:2 MPEG-2 video | 5 | Say Y here to enable support for the Stradis 4:2:2 MPEG-2 video |
6 | driver for PCI. There is a product page at | 6 | driver for PCI. There is a product page at |
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index eaa5d3efa79d..c556ed9db13d 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c | |||
@@ -554,7 +554,7 @@ EXPORT_SYMBOL(handle_sysrq); | |||
554 | #ifdef CONFIG_INPUT | 554 | #ifdef CONFIG_INPUT |
555 | 555 | ||
556 | /* Simple translation table for the SysRq keys */ | 556 | /* Simple translation table for the SysRq keys */ |
557 | static const unsigned char sysrq_xlate[KEY_MAX + 1] = | 557 | static const unsigned char sysrq_xlate[KEY_CNT] = |
558 | "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */ | 558 | "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */ |
559 | "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */ | 559 | "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */ |
560 | "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */ | 560 | "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */ |
@@ -563,53 +563,129 @@ static const unsigned char sysrq_xlate[KEY_MAX + 1] = | |||
563 | "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */ | 563 | "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */ |
564 | "\r\000/"; /* 0x60 - 0x6f */ | 564 | "\r\000/"; /* 0x60 - 0x6f */ |
565 | 565 | ||
566 | static bool sysrq_down; | 566 | struct sysrq_state { |
567 | static int sysrq_alt_use; | 567 | struct input_handle handle; |
568 | static int sysrq_alt; | 568 | struct work_struct reinject_work; |
569 | static DEFINE_SPINLOCK(sysrq_event_lock); | 569 | unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; |
570 | unsigned int alt; | ||
571 | unsigned int alt_use; | ||
572 | bool active; | ||
573 | bool need_reinject; | ||
574 | }; | ||
575 | |||
576 | static void sysrq_reinject_alt_sysrq(struct work_struct *work) | ||
577 | { | ||
578 | struct sysrq_state *sysrq = | ||
579 | container_of(work, struct sysrq_state, reinject_work); | ||
580 | struct input_handle *handle = &sysrq->handle; | ||
581 | unsigned int alt_code = sysrq->alt_use; | ||
582 | |||
583 | if (sysrq->need_reinject) { | ||
584 | /* Simulate press and release of Alt + SysRq */ | ||
585 | input_inject_event(handle, EV_KEY, alt_code, 1); | ||
586 | input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1); | ||
587 | input_inject_event(handle, EV_SYN, SYN_REPORT, 1); | ||
588 | |||
589 | input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0); | ||
590 | input_inject_event(handle, EV_KEY, alt_code, 0); | ||
591 | input_inject_event(handle, EV_SYN, SYN_REPORT, 1); | ||
592 | } | ||
593 | } | ||
570 | 594 | ||
571 | static bool sysrq_filter(struct input_handle *handle, unsigned int type, | 595 | static bool sysrq_filter(struct input_handle *handle, |
572 | unsigned int code, int value) | 596 | unsigned int type, unsigned int code, int value) |
573 | { | 597 | { |
598 | struct sysrq_state *sysrq = handle->private; | ||
599 | bool was_active = sysrq->active; | ||
574 | bool suppress; | 600 | bool suppress; |
575 | 601 | ||
576 | /* We are called with interrupts disabled, just take the lock */ | 602 | switch (type) { |
577 | spin_lock(&sysrq_event_lock); | ||
578 | 603 | ||
579 | if (type != EV_KEY) | 604 | case EV_SYN: |
580 | goto out; | 605 | suppress = false; |
606 | break; | ||
581 | 607 | ||
582 | switch (code) { | 608 | case EV_KEY: |
609 | switch (code) { | ||
583 | 610 | ||
584 | case KEY_LEFTALT: | 611 | case KEY_LEFTALT: |
585 | case KEY_RIGHTALT: | 612 | case KEY_RIGHTALT: |
586 | if (value) | 613 | if (!value) { |
587 | sysrq_alt = code; | 614 | /* One of ALTs is being released */ |
588 | else { | 615 | if (sysrq->active && code == sysrq->alt_use) |
589 | if (sysrq_down && code == sysrq_alt_use) | 616 | sysrq->active = false; |
590 | sysrq_down = false; | ||
591 | 617 | ||
592 | sysrq_alt = 0; | 618 | sysrq->alt = KEY_RESERVED; |
619 | |||
620 | } else if (value != 2) { | ||
621 | sysrq->alt = code; | ||
622 | sysrq->need_reinject = false; | ||
623 | } | ||
624 | break; | ||
625 | |||
626 | case KEY_SYSRQ: | ||
627 | if (value == 1 && sysrq->alt != KEY_RESERVED) { | ||
628 | sysrq->active = true; | ||
629 | sysrq->alt_use = sysrq->alt; | ||
630 | /* | ||
631 | * If nothing else will be pressed we'll need | ||
632 | * to * re-inject Alt-SysRq keysroke. | ||
633 | */ | ||
634 | sysrq->need_reinject = true; | ||
635 | } | ||
636 | |||
637 | /* | ||
638 | * Pretend that sysrq was never pressed at all. This | ||
639 | * is needed to properly handle KGDB which will try | ||
640 | * to release all keys after exiting debugger. If we | ||
641 | * do not clear key bit it KGDB will end up sending | ||
642 | * release events for Alt and SysRq, potentially | ||
643 | * triggering print screen function. | ||
644 | */ | ||
645 | if (sysrq->active) | ||
646 | clear_bit(KEY_SYSRQ, handle->dev->key); | ||
647 | |||
648 | break; | ||
649 | |||
650 | default: | ||
651 | if (sysrq->active && value && value != 2) { | ||
652 | sysrq->need_reinject = false; | ||
653 | __handle_sysrq(sysrq_xlate[code], true); | ||
654 | } | ||
655 | break; | ||
593 | } | 656 | } |
594 | break; | ||
595 | 657 | ||
596 | case KEY_SYSRQ: | 658 | suppress = sysrq->active; |
597 | if (value == 1 && sysrq_alt) { | 659 | |
598 | sysrq_down = true; | 660 | if (!sysrq->active) { |
599 | sysrq_alt_use = sysrq_alt; | 661 | /* |
662 | * If we are not suppressing key presses keep track of | ||
663 | * keyboard state so we can release keys that have been | ||
664 | * pressed before entering SysRq mode. | ||
665 | */ | ||
666 | if (value) | ||
667 | set_bit(code, sysrq->key_down); | ||
668 | else | ||
669 | clear_bit(code, sysrq->key_down); | ||
670 | |||
671 | if (was_active) | ||
672 | schedule_work(&sysrq->reinject_work); | ||
673 | |||
674 | } else if (value == 0 && | ||
675 | test_and_clear_bit(code, sysrq->key_down)) { | ||
676 | /* | ||
677 | * Pass on release events for keys that was pressed before | ||
678 | * entering SysRq mode. | ||
679 | */ | ||
680 | suppress = false; | ||
600 | } | 681 | } |
601 | break; | 682 | break; |
602 | 683 | ||
603 | default: | 684 | default: |
604 | if (sysrq_down && value && value != 2) | 685 | suppress = sysrq->active; |
605 | __handle_sysrq(sysrq_xlate[code], true); | ||
606 | break; | 686 | break; |
607 | } | 687 | } |
608 | 688 | ||
609 | out: | ||
610 | suppress = sysrq_down; | ||
611 | spin_unlock(&sysrq_event_lock); | ||
612 | |||
613 | return suppress; | 689 | return suppress; |
614 | } | 690 | } |
615 | 691 | ||
@@ -617,28 +693,28 @@ static int sysrq_connect(struct input_handler *handler, | |||
617 | struct input_dev *dev, | 693 | struct input_dev *dev, |
618 | const struct input_device_id *id) | 694 | const struct input_device_id *id) |
619 | { | 695 | { |
620 | struct input_handle *handle; | 696 | struct sysrq_state *sysrq; |
621 | int error; | 697 | int error; |
622 | 698 | ||
623 | sysrq_down = false; | 699 | sysrq = kzalloc(sizeof(struct sysrq_state), GFP_KERNEL); |
624 | sysrq_alt = 0; | 700 | if (!sysrq) |
625 | |||
626 | handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); | ||
627 | if (!handle) | ||
628 | return -ENOMEM; | 701 | return -ENOMEM; |
629 | 702 | ||
630 | handle->dev = dev; | 703 | INIT_WORK(&sysrq->reinject_work, sysrq_reinject_alt_sysrq); |
631 | handle->handler = handler; | 704 | |
632 | handle->name = "sysrq"; | 705 | sysrq->handle.dev = dev; |
706 | sysrq->handle.handler = handler; | ||
707 | sysrq->handle.name = "sysrq"; | ||
708 | sysrq->handle.private = sysrq; | ||
633 | 709 | ||
634 | error = input_register_handle(handle); | 710 | error = input_register_handle(&sysrq->handle); |
635 | if (error) { | 711 | if (error) { |
636 | pr_err("Failed to register input sysrq handler, error %d\n", | 712 | pr_err("Failed to register input sysrq handler, error %d\n", |
637 | error); | 713 | error); |
638 | goto err_free; | 714 | goto err_free; |
639 | } | 715 | } |
640 | 716 | ||
641 | error = input_open_device(handle); | 717 | error = input_open_device(&sysrq->handle); |
642 | if (error) { | 718 | if (error) { |
643 | pr_err("Failed to open input device, error %d\n", error); | 719 | pr_err("Failed to open input device, error %d\n", error); |
644 | goto err_unregister; | 720 | goto err_unregister; |
@@ -647,17 +723,20 @@ static int sysrq_connect(struct input_handler *handler, | |||
647 | return 0; | 723 | return 0; |
648 | 724 | ||
649 | err_unregister: | 725 | err_unregister: |
650 | input_unregister_handle(handle); | 726 | input_unregister_handle(&sysrq->handle); |
651 | err_free: | 727 | err_free: |
652 | kfree(handle); | 728 | kfree(sysrq); |
653 | return error; | 729 | return error; |
654 | } | 730 | } |
655 | 731 | ||
656 | static void sysrq_disconnect(struct input_handle *handle) | 732 | static void sysrq_disconnect(struct input_handle *handle) |
657 | { | 733 | { |
734 | struct sysrq_state *sysrq = handle->private; | ||
735 | |||
658 | input_close_device(handle); | 736 | input_close_device(handle); |
737 | cancel_work_sync(&sysrq->reinject_work); | ||
659 | input_unregister_handle(handle); | 738 | input_unregister_handle(handle); |
660 | kfree(handle); | 739 | kfree(sysrq); |
661 | } | 740 | } |
662 | 741 | ||
663 | /* | 742 | /* |
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index ea071a5b6eee..44447f54942f 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c | |||
@@ -2301,7 +2301,7 @@ out: | |||
2301 | return ret; | 2301 | return ret; |
2302 | } | 2302 | } |
2303 | 2303 | ||
2304 | static DEVICE_ATTR(stat_status, S_IWUGO | S_IRUGO, read_status, reboot); | 2304 | static DEVICE_ATTR(stat_status, S_IWUSR | S_IRUGO, read_status, reboot); |
2305 | 2305 | ||
2306 | static ssize_t read_human_status(struct device *dev, | 2306 | static ssize_t read_human_status(struct device *dev, |
2307 | struct device_attribute *attr, char *buf) | 2307 | struct device_attribute *attr, char *buf) |
@@ -2364,8 +2364,7 @@ out: | |||
2364 | return ret; | 2364 | return ret; |
2365 | } | 2365 | } |
2366 | 2366 | ||
2367 | static DEVICE_ATTR(stat_human_status, S_IWUGO | S_IRUGO, | 2367 | static DEVICE_ATTR(stat_human_status, S_IRUGO, read_human_status, NULL); |
2368 | read_human_status, NULL); | ||
2369 | 2368 | ||
2370 | static ssize_t read_delin(struct device *dev, struct device_attribute *attr, | 2369 | static ssize_t read_delin(struct device *dev, struct device_attribute *attr, |
2371 | char *buf) | 2370 | char *buf) |
@@ -2397,7 +2396,7 @@ out: | |||
2397 | return ret; | 2396 | return ret; |
2398 | } | 2397 | } |
2399 | 2398 | ||
2400 | static DEVICE_ATTR(stat_delin, S_IWUGO | S_IRUGO, read_delin, NULL); | 2399 | static DEVICE_ATTR(stat_delin, S_IRUGO, read_delin, NULL); |
2401 | 2400 | ||
2402 | #define UEA_ATTR(name, reset) \ | 2401 | #define UEA_ATTR(name, reset) \ |
2403 | \ | 2402 | \ |
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index ddb4dc980923..a3d2e2399655 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c | |||
@@ -54,7 +54,6 @@ | |||
54 | #include <linux/gfp.h> | 54 | #include <linux/gfp.h> |
55 | #include <linux/poll.h> | 55 | #include <linux/poll.h> |
56 | #include <linux/usb.h> | 56 | #include <linux/usb.h> |
57 | #include <linux/smp_lock.h> | ||
58 | #include <linux/usbdevice_fs.h> | 57 | #include <linux/usbdevice_fs.h> |
59 | #include <linux/usb/hcd.h> | 58 | #include <linux/usb/hcd.h> |
60 | #include <linux/mutex.h> | 59 | #include <linux/mutex.h> |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 045bb4b823e1..a7131ad630f9 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <linux/fs.h> | 37 | #include <linux/fs.h> |
38 | #include <linux/mm.h> | 38 | #include <linux/mm.h> |
39 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
40 | #include <linux/smp_lock.h> | ||
41 | #include <linux/signal.h> | 40 | #include <linux/signal.h> |
42 | #include <linux/poll.h> | 41 | #include <linux/poll.h> |
43 | #include <linux/module.h> | 42 | #include <linux/module.h> |
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index 9fe34fb78ef1..cf6a5423de09 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
20 | #include <linux/rwsem.h> | 20 | #include <linux/rwsem.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/smp_lock.h> | ||
23 | #include <linux/usb.h> | 22 | #include <linux/usb.h> |
24 | 23 | ||
25 | #include "usb.h" | 24 | #include "usb.h" |
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index 9819a4cc3b26..b690aa35df9a 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c | |||
@@ -39,7 +39,6 @@ | |||
39 | #include <linux/parser.h> | 39 | #include <linux/parser.h> |
40 | #include <linux/notifier.h> | 40 | #include <linux/notifier.h> |
41 | #include <linux/seq_file.h> | 41 | #include <linux/seq_file.h> |
42 | #include <linux/smp_lock.h> | ||
43 | #include <linux/usb/hcd.h> | 42 | #include <linux/usb/hcd.h> |
44 | #include <asm/byteorder.h> | 43 | #include <asm/byteorder.h> |
45 | #include "usb.h" | 44 | #include "usb.h" |
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c index b5e20e873cba..717ff653fa23 100644 --- a/drivers/usb/gadget/atmel_usba_udc.c +++ b/drivers/usb/gadget/atmel_usba_udc.c | |||
@@ -2017,7 +2017,7 @@ static int __init usba_udc_probe(struct platform_device *pdev) | |||
2017 | } | 2017 | } |
2018 | } else { | 2018 | } else { |
2019 | /* gpio_request fail so use -EINVAL for gpio_is_valid */ | 2019 | /* gpio_request fail so use -EINVAL for gpio_is_valid */ |
2020 | ubc->vbus_pin = -EINVAL; | 2020 | udc->vbus_pin = -EINVAL; |
2021 | } | 2021 | } |
2022 | } | 2022 | } |
2023 | 2023 | ||
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c index 4a830df4fc31..484c5ba5450e 100644 --- a/drivers/usb/gadget/f_fs.c +++ b/drivers/usb/gadget/f_fs.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/blkdev.h> | 30 | #include <linux/blkdev.h> |
31 | #include <linux/pagemap.h> | 31 | #include <linux/pagemap.h> |
32 | #include <asm/unaligned.h> | 32 | #include <asm/unaligned.h> |
33 | #include <linux/smp_lock.h> | ||
34 | 33 | ||
35 | #include <linux/usb/composite.h> | 34 | #include <linux/usb/composite.h> |
36 | #include <linux/usb/functionfs.h> | 35 | #include <linux/usb/functionfs.h> |
diff --git a/drivers/usb/gadget/f_hid.c b/drivers/usb/gadget/f_hid.c index 4f891eddd060..598e7e2ab80c 100644 --- a/drivers/usb/gadget/f_hid.c +++ b/drivers/usb/gadget/f_hid.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/cdev.h> | 25 | #include <linux/cdev.h> |
26 | #include <linux/mutex.h> | 26 | #include <linux/mutex.h> |
27 | #include <linux/poll.h> | 27 | #include <linux/poll.h> |
28 | #include <linux/smp_lock.h> | ||
29 | #include <linux/uaccess.h> | 28 | #include <linux/uaccess.h> |
30 | #include <linux/wait.h> | 29 | #include <linux/wait.h> |
31 | #include <linux/usb/g_hid.h> | 30 | #include <linux/usb/g_hid.h> |
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c index 86afdc73322f..6e2599661b5b 100644 --- a/drivers/usb/host/ehci-dbg.c +++ b/drivers/usb/host/ehci-dbg.c | |||
@@ -1067,7 +1067,7 @@ static inline void create_debug_files (struct ehci_hcd *ehci) | |||
1067 | &debug_registers_fops)) | 1067 | &debug_registers_fops)) |
1068 | goto file_error; | 1068 | goto file_error; |
1069 | 1069 | ||
1070 | if (!debugfs_create_file("lpm", S_IRUGO|S_IWUGO, ehci->debug_dir, bus, | 1070 | if (!debugfs_create_file("lpm", S_IRUGO|S_IWUSR, ehci->debug_dir, bus, |
1071 | &debug_lpm_fops)) | 1071 | &debug_lpm_fops)) |
1072 | goto file_error; | 1072 | goto file_error; |
1073 | 1073 | ||
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 502a7e6fef42..e9062806d4a2 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
@@ -1063,10 +1063,11 @@ rescan: | |||
1063 | tmp && tmp != qh; | 1063 | tmp && tmp != qh; |
1064 | tmp = tmp->qh_next.qh) | 1064 | tmp = tmp->qh_next.qh) |
1065 | continue; | 1065 | continue; |
1066 | /* periodic qh self-unlinks on empty */ | 1066 | /* periodic qh self-unlinks on empty, and a COMPLETING qh |
1067 | if (!tmp) | 1067 | * may already be unlinked. |
1068 | goto nogood; | 1068 | */ |
1069 | unlink_async (ehci, qh); | 1069 | if (tmp) |
1070 | unlink_async(ehci, qh); | ||
1070 | /* FALL THROUGH */ | 1071 | /* FALL THROUGH */ |
1071 | case QH_STATE_UNLINK: /* wait for hw to finish? */ | 1072 | case QH_STATE_UNLINK: /* wait for hw to finish? */ |
1072 | case QH_STATE_UNLINK_WAIT: | 1073 | case QH_STATE_UNLINK_WAIT: |
@@ -1083,7 +1084,6 @@ idle_timeout: | |||
1083 | } | 1084 | } |
1084 | /* else FALL THROUGH */ | 1085 | /* else FALL THROUGH */ |
1085 | default: | 1086 | default: |
1086 | nogood: | ||
1087 | /* caller was supposed to have unlinked any requests; | 1087 | /* caller was supposed to have unlinked any requests; |
1088 | * that's not our job. just leak this memory. | 1088 | * that's not our job. just leak this memory. |
1089 | */ | 1089 | */ |
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c index d36e4e75e08d..12f70c302b0b 100644 --- a/drivers/usb/host/ehci-mem.c +++ b/drivers/usb/host/ehci-mem.c | |||
@@ -141,6 +141,10 @@ static void ehci_mem_cleanup (struct ehci_hcd *ehci) | |||
141 | qh_put (ehci->async); | 141 | qh_put (ehci->async); |
142 | ehci->async = NULL; | 142 | ehci->async = NULL; |
143 | 143 | ||
144 | if (ehci->dummy) | ||
145 | qh_put(ehci->dummy); | ||
146 | ehci->dummy = NULL; | ||
147 | |||
144 | /* DMA consistent memory and pools */ | 148 | /* DMA consistent memory and pools */ |
145 | if (ehci->qtd_pool) | 149 | if (ehci->qtd_pool) |
146 | dma_pool_destroy (ehci->qtd_pool); | 150 | dma_pool_destroy (ehci->qtd_pool); |
@@ -227,8 +231,26 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags) | |||
227 | if (ehci->periodic == NULL) { | 231 | if (ehci->periodic == NULL) { |
228 | goto fail; | 232 | goto fail; |
229 | } | 233 | } |
230 | for (i = 0; i < ehci->periodic_size; i++) | 234 | |
231 | ehci->periodic [i] = EHCI_LIST_END(ehci); | 235 | if (ehci->use_dummy_qh) { |
236 | struct ehci_qh_hw *hw; | ||
237 | ehci->dummy = ehci_qh_alloc(ehci, flags); | ||
238 | if (!ehci->dummy) | ||
239 | goto fail; | ||
240 | |||
241 | hw = ehci->dummy->hw; | ||
242 | hw->hw_next = EHCI_LIST_END(ehci); | ||
243 | hw->hw_qtd_next = EHCI_LIST_END(ehci); | ||
244 | hw->hw_alt_next = EHCI_LIST_END(ehci); | ||
245 | hw->hw_token &= ~QTD_STS_ACTIVE; | ||
246 | ehci->dummy->hw = hw; | ||
247 | |||
248 | for (i = 0; i < ehci->periodic_size; i++) | ||
249 | ehci->periodic[i] = ehci->dummy->qh_dma; | ||
250 | } else { | ||
251 | for (i = 0; i < ehci->periodic_size; i++) | ||
252 | ehci->periodic[i] = EHCI_LIST_END(ehci); | ||
253 | } | ||
232 | 254 | ||
233 | /* software shadow of hardware table */ | 255 | /* software shadow of hardware table */ |
234 | ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags); | 256 | ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags); |
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index a1e8d273103f..01bb72b71832 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c | |||
@@ -103,6 +103,19 @@ static int ehci_pci_setup(struct usb_hcd *hcd) | |||
103 | if (retval) | 103 | if (retval) |
104 | return retval; | 104 | return retval; |
105 | 105 | ||
106 | if ((pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x7808) || | ||
107 | (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x4396)) { | ||
108 | /* EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may | ||
109 | * read/write memory space which does not belong to it when | ||
110 | * there is NULL pointer with T-bit set to 1 in the frame list | ||
111 | * table. To avoid the issue, the frame list link pointer | ||
112 | * should always contain a valid pointer to a inactive qh. | ||
113 | */ | ||
114 | ehci->use_dummy_qh = 1; | ||
115 | ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI " | ||
116 | "dummy qh workaround\n"); | ||
117 | } | ||
118 | |||
106 | /* data structure init */ | 119 | /* data structure init */ |
107 | retval = ehci_init(hcd); | 120 | retval = ehci_init(hcd); |
108 | if (retval) | 121 | if (retval) |
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index a92526d6e5ae..d9f78eb26572 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
@@ -98,7 +98,14 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) | |||
98 | */ | 98 | */ |
99 | *prev_p = *periodic_next_shadow(ehci, &here, | 99 | *prev_p = *periodic_next_shadow(ehci, &here, |
100 | Q_NEXT_TYPE(ehci, *hw_p)); | 100 | Q_NEXT_TYPE(ehci, *hw_p)); |
101 | *hw_p = *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)); | 101 | |
102 | if (!ehci->use_dummy_qh || | ||
103 | *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)) | ||
104 | != EHCI_LIST_END(ehci)) | ||
105 | *hw_p = *shadow_next_periodic(ehci, &here, | ||
106 | Q_NEXT_TYPE(ehci, *hw_p)); | ||
107 | else | ||
108 | *hw_p = ehci->dummy->qh_dma; | ||
102 | } | 109 | } |
103 | 110 | ||
104 | /* how many of the uframe's 125 usecs are allocated? */ | 111 | /* how many of the uframe's 125 usecs are allocated? */ |
@@ -2335,7 +2342,11 @@ restart: | |||
2335 | * pointer for much longer, if at all. | 2342 | * pointer for much longer, if at all. |
2336 | */ | 2343 | */ |
2337 | *q_p = q.itd->itd_next; | 2344 | *q_p = q.itd->itd_next; |
2338 | *hw_p = q.itd->hw_next; | 2345 | if (!ehci->use_dummy_qh || |
2346 | q.itd->hw_next != EHCI_LIST_END(ehci)) | ||
2347 | *hw_p = q.itd->hw_next; | ||
2348 | else | ||
2349 | *hw_p = ehci->dummy->qh_dma; | ||
2339 | type = Q_NEXT_TYPE(ehci, q.itd->hw_next); | 2350 | type = Q_NEXT_TYPE(ehci, q.itd->hw_next); |
2340 | wmb(); | 2351 | wmb(); |
2341 | modified = itd_complete (ehci, q.itd); | 2352 | modified = itd_complete (ehci, q.itd); |
@@ -2368,7 +2379,11 @@ restart: | |||
2368 | * URB completion. | 2379 | * URB completion. |
2369 | */ | 2380 | */ |
2370 | *q_p = q.sitd->sitd_next; | 2381 | *q_p = q.sitd->sitd_next; |
2371 | *hw_p = q.sitd->hw_next; | 2382 | if (!ehci->use_dummy_qh || |
2383 | q.sitd->hw_next != EHCI_LIST_END(ehci)) | ||
2384 | *hw_p = q.sitd->hw_next; | ||
2385 | else | ||
2386 | *hw_p = ehci->dummy->qh_dma; | ||
2372 | type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); | 2387 | type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); |
2373 | wmb(); | 2388 | wmb(); |
2374 | modified = sitd_complete (ehci, q.sitd); | 2389 | modified = sitd_complete (ehci, q.sitd); |
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index bde823f704e9..ba8eab366b82 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h | |||
@@ -73,6 +73,7 @@ struct ehci_hcd { /* one per controller */ | |||
73 | 73 | ||
74 | /* async schedule support */ | 74 | /* async schedule support */ |
75 | struct ehci_qh *async; | 75 | struct ehci_qh *async; |
76 | struct ehci_qh *dummy; /* For AMD quirk use */ | ||
76 | struct ehci_qh *reclaim; | 77 | struct ehci_qh *reclaim; |
77 | unsigned scanning : 1; | 78 | unsigned scanning : 1; |
78 | 79 | ||
@@ -131,6 +132,7 @@ struct ehci_hcd { /* one per controller */ | |||
131 | unsigned need_io_watchdog:1; | 132 | unsigned need_io_watchdog:1; |
132 | unsigned broken_periodic:1; | 133 | unsigned broken_periodic:1; |
133 | unsigned fs_i_thresh:1; /* Intel iso scheduling */ | 134 | unsigned fs_i_thresh:1; /* Intel iso scheduling */ |
135 | unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/ | ||
134 | 136 | ||
135 | /* required for usb32 quirk */ | 137 | /* required for usb32 quirk */ |
136 | #define OHCI_CTRL_HCFS (3 << 6) | 138 | #define OHCI_CTRL_HCFS (3 << 6) |
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index 8196fa11fec4..43a39eb56cc6 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c | |||
@@ -70,7 +70,6 @@ | |||
70 | #include <linux/ioport.h> | 70 | #include <linux/ioport.h> |
71 | #include <linux/sched.h> | 71 | #include <linux/sched.h> |
72 | #include <linux/slab.h> | 72 | #include <linux/slab.h> |
73 | #include <linux/smp_lock.h> | ||
74 | #include <linux/errno.h> | 73 | #include <linux/errno.h> |
75 | #include <linux/init.h> | 74 | #include <linux/init.h> |
76 | #include <linux/list.h> | 75 | #include <linux/list.h> |
@@ -2684,7 +2683,7 @@ static int __devexit isp1362_remove(struct platform_device *pdev) | |||
2684 | return 0; | 2683 | return 0; |
2685 | } | 2684 | } |
2686 | 2685 | ||
2687 | static int __init isp1362_probe(struct platform_device *pdev) | 2686 | static int __devinit isp1362_probe(struct platform_device *pdev) |
2688 | { | 2687 | { |
2689 | struct usb_hcd *hcd; | 2688 | struct usb_hcd *hcd; |
2690 | struct isp1362_hcd *isp1362_hcd; | 2689 | struct isp1362_hcd *isp1362_hcd; |
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c index 6e7fb5f38db6..ee60cd3ea642 100644 --- a/drivers/usb/host/uhci-debug.c +++ b/drivers/usb/host/uhci-debug.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/debugfs.h> | 14 | #include <linux/debugfs.h> |
15 | #include <linux/smp_lock.h> | ||
16 | #include <asm/io.h> | 15 | #include <asm/io.h> |
17 | 16 | ||
18 | #include "uhci-hcd.h" | 17 | #include "uhci-hcd.h" |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 202770676da3..d178761c3981 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -1045,7 +1045,7 @@ static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, | |||
1045 | if (udev->speed == USB_SPEED_SUPER) | 1045 | if (udev->speed == USB_SPEED_SUPER) |
1046 | return ep->ss_ep_comp.wBytesPerInterval; | 1046 | return ep->ss_ep_comp.wBytesPerInterval; |
1047 | 1047 | ||
1048 | max_packet = ep->desc.wMaxPacketSize & 0x3ff; | 1048 | max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); |
1049 | max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; | 1049 | max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; |
1050 | /* A 0 in max burst means 1 transfer per ESIT */ | 1050 | /* A 0 in max burst means 1 transfer per ESIT */ |
1051 | return max_packet * (max_burst + 1); | 1051 | return max_packet * (max_burst + 1); |
@@ -1135,7 +1135,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
1135 | /* Fall through */ | 1135 | /* Fall through */ |
1136 | case USB_SPEED_FULL: | 1136 | case USB_SPEED_FULL: |
1137 | case USB_SPEED_LOW: | 1137 | case USB_SPEED_LOW: |
1138 | max_packet = ep->desc.wMaxPacketSize & 0x3ff; | 1138 | max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); |
1139 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); | 1139 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); |
1140 | break; | 1140 | break; |
1141 | default: | 1141 | default: |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 9f3115e729b1..df558f6f84e3 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -2104,7 +2104,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) | |||
2104 | 2104 | ||
2105 | if (!(status & STS_EINT)) { | 2105 | if (!(status & STS_EINT)) { |
2106 | spin_unlock(&xhci->lock); | 2106 | spin_unlock(&xhci->lock); |
2107 | xhci_warn(xhci, "Spurious interrupt.\n"); | ||
2108 | return IRQ_NONE; | 2107 | return IRQ_NONE; |
2109 | } | 2108 | } |
2110 | xhci_dbg(xhci, "op reg status = %08x\n", status); | 2109 | xhci_dbg(xhci, "op reg status = %08x\n", status); |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 5d7d4e951ea4..06fca0835b52 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -577,6 +577,65 @@ static void xhci_restore_registers(struct xhci_hcd *xhci) | |||
577 | xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); | 577 | xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); |
578 | } | 578 | } |
579 | 579 | ||
580 | static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) | ||
581 | { | ||
582 | u64 val_64; | ||
583 | |||
584 | /* step 2: initialize command ring buffer */ | ||
585 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); | ||
586 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | | ||
587 | (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | ||
588 | xhci->cmd_ring->dequeue) & | ||
589 | (u64) ~CMD_RING_RSVD_BITS) | | ||
590 | xhci->cmd_ring->cycle_state; | ||
591 | xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", | ||
592 | (long unsigned long) val_64); | ||
593 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); | ||
594 | } | ||
595 | |||
596 | /* | ||
597 | * The whole command ring must be cleared to zero when we suspend the host. | ||
598 | * | ||
599 | * The host doesn't save the command ring pointer in the suspend well, so we | ||
600 | * need to re-program it on resume. Unfortunately, the pointer must be 64-byte | ||
601 | * aligned, because of the reserved bits in the command ring dequeue pointer | ||
602 | * register. Therefore, we can't just set the dequeue pointer back in the | ||
603 | * middle of the ring (TRBs are 16-byte aligned). | ||
604 | */ | ||
605 | static void xhci_clear_command_ring(struct xhci_hcd *xhci) | ||
606 | { | ||
607 | struct xhci_ring *ring; | ||
608 | struct xhci_segment *seg; | ||
609 | |||
610 | ring = xhci->cmd_ring; | ||
611 | seg = ring->deq_seg; | ||
612 | do { | ||
613 | memset(seg->trbs, 0, SEGMENT_SIZE); | ||
614 | seg = seg->next; | ||
615 | } while (seg != ring->deq_seg); | ||
616 | |||
617 | /* Reset the software enqueue and dequeue pointers */ | ||
618 | ring->deq_seg = ring->first_seg; | ||
619 | ring->dequeue = ring->first_seg->trbs; | ||
620 | ring->enq_seg = ring->deq_seg; | ||
621 | ring->enqueue = ring->dequeue; | ||
622 | |||
623 | /* | ||
624 | * Ring is now zeroed, so the HW should look for change of ownership | ||
625 | * when the cycle bit is set to 1. | ||
626 | */ | ||
627 | ring->cycle_state = 1; | ||
628 | |||
629 | /* | ||
630 | * Reset the hardware dequeue pointer. | ||
631 | * Yes, this will need to be re-written after resume, but we're paranoid | ||
632 | * and want to make sure the hardware doesn't access bogus memory | ||
633 | * because, say, the BIOS or an SMI started the host without changing | ||
634 | * the command ring pointers. | ||
635 | */ | ||
636 | xhci_set_cmd_ring_deq(xhci); | ||
637 | } | ||
638 | |||
580 | /* | 639 | /* |
581 | * Stop HC (not bus-specific) | 640 | * Stop HC (not bus-specific) |
582 | * | 641 | * |
@@ -604,6 +663,7 @@ int xhci_suspend(struct xhci_hcd *xhci) | |||
604 | spin_unlock_irq(&xhci->lock); | 663 | spin_unlock_irq(&xhci->lock); |
605 | return -ETIMEDOUT; | 664 | return -ETIMEDOUT; |
606 | } | 665 | } |
666 | xhci_clear_command_ring(xhci); | ||
607 | 667 | ||
608 | /* step 3: save registers */ | 668 | /* step 3: save registers */ |
609 | xhci_save_registers(xhci); | 669 | xhci_save_registers(xhci); |
@@ -635,7 +695,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
635 | u32 command, temp = 0; | 695 | u32 command, temp = 0; |
636 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | 696 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
637 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); | 697 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); |
638 | u64 val_64; | ||
639 | int old_state, retval; | 698 | int old_state, retval; |
640 | 699 | ||
641 | old_state = hcd->state; | 700 | old_state = hcd->state; |
@@ -648,15 +707,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
648 | /* step 1: restore register */ | 707 | /* step 1: restore register */ |
649 | xhci_restore_registers(xhci); | 708 | xhci_restore_registers(xhci); |
650 | /* step 2: initialize command ring buffer */ | 709 | /* step 2: initialize command ring buffer */ |
651 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); | 710 | xhci_set_cmd_ring_deq(xhci); |
652 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | | ||
653 | (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | ||
654 | xhci->cmd_ring->dequeue) & | ||
655 | (u64) ~CMD_RING_RSVD_BITS) | | ||
656 | xhci->cmd_ring->cycle_state; | ||
657 | xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", | ||
658 | (long unsigned long) val_64); | ||
659 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); | ||
660 | /* step 3: restore state and start state*/ | 711 | /* step 3: restore state and start state*/ |
661 | /* step 3: set CRS flag */ | 712 | /* step 3: set CRS flag */ |
662 | command = xhci_readl(xhci, &xhci->op_regs->command); | 713 | command = xhci_readl(xhci, &xhci->op_regs->command); |
@@ -714,6 +765,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
714 | return retval; | 765 | return retval; |
715 | } | 766 | } |
716 | 767 | ||
768 | spin_unlock_irq(&xhci->lock); | ||
717 | /* Re-setup MSI-X */ | 769 | /* Re-setup MSI-X */ |
718 | if (hcd->irq) | 770 | if (hcd->irq) |
719 | free_irq(hcd->irq, hcd); | 771 | free_irq(hcd->irq, hcd); |
@@ -736,6 +788,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
736 | hcd->irq = pdev->irq; | 788 | hcd->irq = pdev->irq; |
737 | } | 789 | } |
738 | 790 | ||
791 | spin_lock_irq(&xhci->lock); | ||
739 | /* step 4: set Run/Stop bit */ | 792 | /* step 4: set Run/Stop bit */ |
740 | command = xhci_readl(xhci, &xhci->op_regs->command); | 793 | command = xhci_readl(xhci, &xhci->op_regs->command); |
741 | command |= CMD_RUN; | 794 | command |= CMD_RUN; |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 93d3bf4d213c..85e65647d445 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -621,6 +621,11 @@ struct xhci_ep_ctx { | |||
621 | #define MAX_PACKET_MASK (0xffff << 16) | 621 | #define MAX_PACKET_MASK (0xffff << 16) |
622 | #define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) | 622 | #define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) |
623 | 623 | ||
624 | /* Get max packet size from ep desc. Bit 10..0 specify the max packet size. | ||
625 | * USB2.0 spec 9.6.6. | ||
626 | */ | ||
627 | #define GET_MAX_PACKET(p) ((p) & 0x7ff) | ||
628 | |||
624 | /* tx_info bitmasks */ | 629 | /* tx_info bitmasks */ |
625 | #define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff) | 630 | #define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff) |
626 | #define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16) | 631 | #define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16) |
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c index 5a47805d9580..c90c89dc0003 100644 --- a/drivers/usb/image/microtek.c +++ b/drivers/usb/image/microtek.c | |||
@@ -364,7 +364,7 @@ static int mts_scsi_host_reset(struct scsi_cmnd *srb) | |||
364 | } | 364 | } |
365 | 365 | ||
366 | static int | 366 | static int |
367 | mts_scsi_queuecommand(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback); | 367 | mts_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *srb); |
368 | 368 | ||
369 | static void mts_transfer_cleanup( struct urb *transfer ); | 369 | static void mts_transfer_cleanup( struct urb *transfer ); |
370 | static void mts_do_sg(struct urb * transfer); | 370 | static void mts_do_sg(struct urb * transfer); |
@@ -573,7 +573,7 @@ mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc) | |||
573 | 573 | ||
574 | 574 | ||
575 | static int | 575 | static int |
576 | mts_scsi_queuecommand(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback) | 576 | mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback) |
577 | { | 577 | { |
578 | struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); | 578 | struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); |
579 | int err = 0; | 579 | int err = 0; |
@@ -626,6 +626,8 @@ out: | |||
626 | return err; | 626 | return err; |
627 | } | 627 | } |
628 | 628 | ||
629 | static DEF_SCSI_QCMD(mts_scsi_queuecommand) | ||
630 | |||
629 | static struct scsi_host_template mts_scsi_host_template = { | 631 | static struct scsi_host_template mts_scsi_host_template = { |
630 | .module = THIS_MODULE, | 632 | .module = THIS_MODULE, |
631 | .name = "microtekX6", | 633 | .name = "microtekX6", |
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c index 2f43c57743c9..9251773ecef4 100644 --- a/drivers/usb/misc/cypress_cy7c63.c +++ b/drivers/usb/misc/cypress_cy7c63.c | |||
@@ -196,11 +196,9 @@ static ssize_t get_port1_handler(struct device *dev, | |||
196 | return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1); | 196 | return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1); |
197 | } | 197 | } |
198 | 198 | ||
199 | static DEVICE_ATTR(port0, S_IWUGO | S_IRUGO, | 199 | static DEVICE_ATTR(port0, S_IRUGO | S_IWUSR, get_port0_handler, set_port0_handler); |
200 | get_port0_handler, set_port0_handler); | ||
201 | 200 | ||
202 | static DEVICE_ATTR(port1, S_IWUGO | S_IRUGO, | 201 | static DEVICE_ATTR(port1, S_IRUGO | S_IWUSR, get_port1_handler, set_port1_handler); |
203 | get_port1_handler, set_port1_handler); | ||
204 | 202 | ||
205 | 203 | ||
206 | static int cypress_probe(struct usb_interface *interface, | 204 | static int cypress_probe(struct usb_interface *interface, |
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c index d77aba46ae85..f63776a48e2a 100644 --- a/drivers/usb/misc/trancevibrator.c +++ b/drivers/usb/misc/trancevibrator.c | |||
@@ -86,7 +86,7 @@ static ssize_t set_speed(struct device *dev, struct device_attribute *attr, | |||
86 | return count; | 86 | return count; |
87 | } | 87 | } |
88 | 88 | ||
89 | static DEVICE_ATTR(speed, S_IWUGO | S_IRUGO, show_speed, set_speed); | 89 | static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR, show_speed, set_speed); |
90 | 90 | ||
91 | static int tv_probe(struct usb_interface *interface, | 91 | static int tv_probe(struct usb_interface *interface, |
92 | const struct usb_device_id *id) | 92 | const struct usb_device_id *id) |
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c index 63da2c3c838f..c96f51de1696 100644 --- a/drivers/usb/misc/usbled.c +++ b/drivers/usb/misc/usbled.c | |||
@@ -94,7 +94,7 @@ static ssize_t set_##value(struct device *dev, struct device_attribute *attr, co | |||
94 | change_color(led); \ | 94 | change_color(led); \ |
95 | return count; \ | 95 | return count; \ |
96 | } \ | 96 | } \ |
97 | static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value); | 97 | static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, show_##value, set_##value); |
98 | show_set(blue); | 98 | show_set(blue); |
99 | show_set(red); | 99 | show_set(red); |
100 | show_set(green); | 100 | show_set(green); |
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c index de8ef945b536..417b8f207e8b 100644 --- a/drivers/usb/misc/usbsevseg.c +++ b/drivers/usb/misc/usbsevseg.c | |||
@@ -192,7 +192,7 @@ static ssize_t set_attr_##name(struct device *dev, \ | |||
192 | \ | 192 | \ |
193 | return count; \ | 193 | return count; \ |
194 | } \ | 194 | } \ |
195 | static DEVICE_ATTR(name, S_IWUGO | S_IRUGO, show_attr_##name, set_attr_##name); | 195 | static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_attr_##name, set_attr_##name); |
196 | 196 | ||
197 | static ssize_t show_attr_text(struct device *dev, | 197 | static ssize_t show_attr_text(struct device *dev, |
198 | struct device_attribute *attr, char *buf) | 198 | struct device_attribute *attr, char *buf) |
@@ -223,7 +223,7 @@ static ssize_t set_attr_text(struct device *dev, | |||
223 | return count; | 223 | return count; |
224 | } | 224 | } |
225 | 225 | ||
226 | static DEVICE_ATTR(text, S_IWUGO | S_IRUGO, show_attr_text, set_attr_text); | 226 | static DEVICE_ATTR(text, S_IRUGO | S_IWUSR, show_attr_text, set_attr_text); |
227 | 227 | ||
228 | static ssize_t show_attr_decimals(struct device *dev, | 228 | static ssize_t show_attr_decimals(struct device *dev, |
229 | struct device_attribute *attr, char *buf) | 229 | struct device_attribute *attr, char *buf) |
@@ -272,8 +272,7 @@ static ssize_t set_attr_decimals(struct device *dev, | |||
272 | return count; | 272 | return count; |
273 | } | 273 | } |
274 | 274 | ||
275 | static DEVICE_ATTR(decimals, S_IWUGO | S_IRUGO, | 275 | static DEVICE_ATTR(decimals, S_IRUGO | S_IWUSR, show_attr_decimals, set_attr_decimals); |
276 | show_attr_decimals, set_attr_decimals); | ||
277 | 276 | ||
278 | static ssize_t show_attr_textmode(struct device *dev, | 277 | static ssize_t show_attr_textmode(struct device *dev, |
279 | struct device_attribute *attr, char *buf) | 278 | struct device_attribute *attr, char *buf) |
@@ -319,8 +318,7 @@ static ssize_t set_attr_textmode(struct device *dev, | |||
319 | return -EINVAL; | 318 | return -EINVAL; |
320 | } | 319 | } |
321 | 320 | ||
322 | static DEVICE_ATTR(textmode, S_IWUGO | S_IRUGO, | 321 | static DEVICE_ATTR(textmode, S_IRUGO | S_IWUSR, show_attr_textmode, set_attr_textmode); |
323 | show_attr_textmode, set_attr_textmode); | ||
324 | 322 | ||
325 | 323 | ||
326 | MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered); | 324 | MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered); |
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index 44cb37b5a4dc..c436e1e2c3b6 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/poll.h> | 15 | #include <linux/poll.h> |
16 | #include <linux/compat.h> | 16 | #include <linux/compat.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/smp_lock.h> | ||
19 | #include <linux/scatterlist.h> | 18 | #include <linux/scatterlist.h> |
20 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
21 | 20 | ||
diff --git a/drivers/usb/mon/mon_stat.c b/drivers/usb/mon/mon_stat.c index 8ec94f15a738..e5ce42bd316e 100644 --- a/drivers/usb/mon/mon_stat.c +++ b/drivers/usb/mon/mon_stat.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/usb.h> | 12 | #include <linux/usb.h> |
13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
14 | #include <linux/smp_lock.h> | ||
15 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
16 | 15 | ||
17 | #include "usb_mon.h" | 16 | #include "usb_mon.h" |
diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c index bdc3ea66be69..9fea48264fa2 100644 --- a/drivers/usb/otg/langwell_otg.c +++ b/drivers/usb/otg/langwell_otg.c | |||
@@ -1896,7 +1896,7 @@ set_a_bus_req(struct device *dev, struct device_attribute *attr, | |||
1896 | } | 1896 | } |
1897 | return count; | 1897 | return count; |
1898 | } | 1898 | } |
1899 | static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUGO, get_a_bus_req, set_a_bus_req); | 1899 | static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR, get_a_bus_req, set_a_bus_req); |
1900 | 1900 | ||
1901 | static ssize_t | 1901 | static ssize_t |
1902 | get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf) | 1902 | get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf) |
@@ -1942,8 +1942,7 @@ set_a_bus_drop(struct device *dev, struct device_attribute *attr, | |||
1942 | } | 1942 | } |
1943 | return count; | 1943 | return count; |
1944 | } | 1944 | } |
1945 | static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUGO, | 1945 | static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR, get_a_bus_drop, set_a_bus_drop); |
1946 | get_a_bus_drop, set_a_bus_drop); | ||
1947 | 1946 | ||
1948 | static ssize_t | 1947 | static ssize_t |
1949 | get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf) | 1948 | get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf) |
@@ -1988,7 +1987,7 @@ set_b_bus_req(struct device *dev, struct device_attribute *attr, | |||
1988 | } | 1987 | } |
1989 | return count; | 1988 | return count; |
1990 | } | 1989 | } |
1991 | static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUGO, get_b_bus_req, set_b_bus_req); | 1990 | static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUSR, get_b_bus_req, set_b_bus_req); |
1992 | 1991 | ||
1993 | static ssize_t | 1992 | static ssize_t |
1994 | set_a_clr_err(struct device *dev, struct device_attribute *attr, | 1993 | set_a_clr_err(struct device *dev, struct device_attribute *attr, |
@@ -2012,7 +2011,7 @@ set_a_clr_err(struct device *dev, struct device_attribute *attr, | |||
2012 | } | 2011 | } |
2013 | return count; | 2012 | return count; |
2014 | } | 2013 | } |
2015 | static DEVICE_ATTR(a_clr_err, S_IWUGO, NULL, set_a_clr_err); | 2014 | static DEVICE_ATTR(a_clr_err, S_IWUSR, NULL, set_a_clr_err); |
2016 | 2015 | ||
2017 | static struct attribute *inputs_attrs[] = { | 2016 | static struct attribute *inputs_attrs[] = { |
2018 | &dev_attr_a_bus_req.attr, | 2017 | &dev_attr_a_bus_req.attr, |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index e64da74bdcc5..861223f2af6e 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/smp_lock.h> | ||
25 | #include <linux/tty.h> | 24 | #include <linux/tty.h> |
26 | #include <linux/tty_driver.h> | 25 | #include <linux/tty_driver.h> |
27 | #include <linux/tty_flip.h> | 26 | #include <linux/tty_flip.h> |
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index a688b1e686ea..689ee1fb702a 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c | |||
@@ -285,7 +285,7 @@ static int slave_configure(struct scsi_device *sdev) | |||
285 | 285 | ||
286 | /* queue a command */ | 286 | /* queue a command */ |
287 | /* This is always called with scsi_lock(host) held */ | 287 | /* This is always called with scsi_lock(host) held */ |
288 | static int queuecommand(struct scsi_cmnd *srb, | 288 | static int queuecommand_lck(struct scsi_cmnd *srb, |
289 | void (*done)(struct scsi_cmnd *)) | 289 | void (*done)(struct scsi_cmnd *)) |
290 | { | 290 | { |
291 | struct us_data *us = host_to_us(srb->device->host); | 291 | struct us_data *us = host_to_us(srb->device->host); |
@@ -315,6 +315,8 @@ static int queuecommand(struct scsi_cmnd *srb, | |||
315 | return 0; | 315 | return 0; |
316 | } | 316 | } |
317 | 317 | ||
318 | static DEF_SCSI_QCMD(queuecommand) | ||
319 | |||
318 | /*********************************************************************** | 320 | /*********************************************************************** |
319 | * Error handling functions | 321 | * Error handling functions |
320 | ***********************************************************************/ | 322 | ***********************************************************************/ |
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c index 57fc2f532cab..ceba512f84d0 100644 --- a/drivers/usb/storage/sierra_ms.c +++ b/drivers/usb/storage/sierra_ms.c | |||
@@ -121,7 +121,7 @@ static ssize_t show_truinst(struct device *dev, struct device_attribute *attr, | |||
121 | } | 121 | } |
122 | return result; | 122 | return result; |
123 | } | 123 | } |
124 | static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL); | 124 | static DEVICE_ATTR(truinst, S_IRUGO, show_truinst, NULL); |
125 | 125 | ||
126 | int sierra_ms_init(struct us_data *us) | 126 | int sierra_ms_init(struct us_data *us) |
127 | { | 127 | { |
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index d1268191acbd..339fac3949df 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c | |||
@@ -430,7 +430,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd, | |||
430 | return 0; | 430 | return 0; |
431 | } | 431 | } |
432 | 432 | ||
433 | static int uas_queuecommand(struct scsi_cmnd *cmnd, | 433 | static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, |
434 | void (*done)(struct scsi_cmnd *)) | 434 | void (*done)(struct scsi_cmnd *)) |
435 | { | 435 | { |
436 | struct scsi_device *sdev = cmnd->device; | 436 | struct scsi_device *sdev = cmnd->device; |
@@ -488,6 +488,8 @@ static int uas_queuecommand(struct scsi_cmnd *cmnd, | |||
488 | return 0; | 488 | return 0; |
489 | } | 489 | } |
490 | 490 | ||
491 | static DEF_SCSI_QCMD(uas_queuecommand) | ||
492 | |||
491 | static int uas_eh_abort_handler(struct scsi_cmnd *cmnd) | 493 | static int uas_eh_abort_handler(struct scsi_cmnd *cmnd) |
492 | { | 494 | { |
493 | struct scsi_device *sdev = cmnd->device; | 495 | struct scsi_device *sdev = cmnd->device; |
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index e207810bba3c..08703299ef61 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c | |||
@@ -197,12 +197,12 @@ static int backlight_suspend(struct device *dev, pm_message_t state) | |||
197 | { | 197 | { |
198 | struct backlight_device *bd = to_backlight_device(dev); | 198 | struct backlight_device *bd = to_backlight_device(dev); |
199 | 199 | ||
200 | if (bd->ops->options & BL_CORE_SUSPENDRESUME) { | 200 | mutex_lock(&bd->ops_lock); |
201 | mutex_lock(&bd->ops_lock); | 201 | if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) { |
202 | bd->props.state |= BL_CORE_SUSPENDED; | 202 | bd->props.state |= BL_CORE_SUSPENDED; |
203 | backlight_update_status(bd); | 203 | backlight_update_status(bd); |
204 | mutex_unlock(&bd->ops_lock); | ||
205 | } | 204 | } |
205 | mutex_unlock(&bd->ops_lock); | ||
206 | 206 | ||
207 | return 0; | 207 | return 0; |
208 | } | 208 | } |
@@ -211,12 +211,12 @@ static int backlight_resume(struct device *dev) | |||
211 | { | 211 | { |
212 | struct backlight_device *bd = to_backlight_device(dev); | 212 | struct backlight_device *bd = to_backlight_device(dev); |
213 | 213 | ||
214 | if (bd->ops->options & BL_CORE_SUSPENDRESUME) { | 214 | mutex_lock(&bd->ops_lock); |
215 | mutex_lock(&bd->ops_lock); | 215 | if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) { |
216 | bd->props.state &= ~BL_CORE_SUSPENDED; | 216 | bd->props.state &= ~BL_CORE_SUSPENDED; |
217 | backlight_update_status(bd); | 217 | backlight_update_status(bd); |
218 | mutex_unlock(&bd->ops_lock); | ||
219 | } | 218 | } |
219 | mutex_unlock(&bd->ops_lock); | ||
220 | 220 | ||
221 | return 0; | 221 | return 0; |
222 | } | 222 | } |
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 54e32c513610..915448ec75bf 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c | |||
@@ -47,7 +47,6 @@ | |||
47 | #include <linux/ioport.h> | 47 | #include <linux/ioport.h> |
48 | #include <linux/init.h> | 48 | #include <linux/init.h> |
49 | #include <linux/screen_info.h> | 49 | #include <linux/screen_info.h> |
50 | #include <linux/smp_lock.h> | ||
51 | #include <video/vga.h> | 50 | #include <video/vga.h> |
52 | #include <asm/io.h> | 51 | #include <asm/io.h> |
53 | 52 | ||
diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c index f53b9f1d6aba..affdf3e32cf3 100644 --- a/drivers/video/fbcmap.c +++ b/drivers/video/fbcmap.c | |||
@@ -88,34 +88,48 @@ static const struct fb_cmap default_16_colors = { | |||
88 | * | 88 | * |
89 | */ | 89 | */ |
90 | 90 | ||
91 | int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp) | 91 | int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags) |
92 | { | 92 | { |
93 | int size = len*sizeof(u16); | 93 | int size = len * sizeof(u16); |
94 | 94 | int ret = -ENOMEM; | |
95 | if (cmap->len != len) { | 95 | |
96 | fb_dealloc_cmap(cmap); | 96 | if (cmap->len != len) { |
97 | if (!len) | 97 | fb_dealloc_cmap(cmap); |
98 | return 0; | 98 | if (!len) |
99 | if (!(cmap->red = kmalloc(size, GFP_ATOMIC))) | 99 | return 0; |
100 | goto fail; | 100 | |
101 | if (!(cmap->green = kmalloc(size, GFP_ATOMIC))) | 101 | cmap->red = kmalloc(size, flags); |
102 | goto fail; | 102 | if (!cmap->red) |
103 | if (!(cmap->blue = kmalloc(size, GFP_ATOMIC))) | 103 | goto fail; |
104 | goto fail; | 104 | cmap->green = kmalloc(size, flags); |
105 | if (transp) { | 105 | if (!cmap->green) |
106 | if (!(cmap->transp = kmalloc(size, GFP_ATOMIC))) | 106 | goto fail; |
107 | cmap->blue = kmalloc(size, flags); | ||
108 | if (!cmap->blue) | ||
109 | goto fail; | ||
110 | if (transp) { | ||
111 | cmap->transp = kmalloc(size, flags); | ||
112 | if (!cmap->transp) | ||
113 | goto fail; | ||
114 | } else { | ||
115 | cmap->transp = NULL; | ||
116 | } | ||
117 | } | ||
118 | cmap->start = 0; | ||
119 | cmap->len = len; | ||
120 | ret = fb_copy_cmap(fb_default_cmap(len), cmap); | ||
121 | if (ret) | ||
107 | goto fail; | 122 | goto fail; |
108 | } else | 123 | return 0; |
109 | cmap->transp = NULL; | ||
110 | } | ||
111 | cmap->start = 0; | ||
112 | cmap->len = len; | ||
113 | fb_copy_cmap(fb_default_cmap(len), cmap); | ||
114 | return 0; | ||
115 | 124 | ||
116 | fail: | 125 | fail: |
117 | fb_dealloc_cmap(cmap); | 126 | fb_dealloc_cmap(cmap); |
118 | return -ENOMEM; | 127 | return ret; |
128 | } | ||
129 | |||
130 | int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp) | ||
131 | { | ||
132 | return fb_alloc_cmap_gfp(cmap, len, transp, GFP_ATOMIC); | ||
119 | } | 133 | } |
120 | 134 | ||
121 | /** | 135 | /** |
@@ -250,8 +264,12 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info) | |||
250 | int rc, size = cmap->len * sizeof(u16); | 264 | int rc, size = cmap->len * sizeof(u16); |
251 | struct fb_cmap umap; | 265 | struct fb_cmap umap; |
252 | 266 | ||
267 | if (size < 0 || size < cmap->len) | ||
268 | return -E2BIG; | ||
269 | |||
253 | memset(&umap, 0, sizeof(struct fb_cmap)); | 270 | memset(&umap, 0, sizeof(struct fb_cmap)); |
254 | rc = fb_alloc_cmap(&umap, cmap->len, cmap->transp != NULL); | 271 | rc = fb_alloc_cmap_gfp(&umap, cmap->len, cmap->transp != NULL, |
272 | GFP_KERNEL); | ||
255 | if (rc) | 273 | if (rc) |
256 | return rc; | 274 | return rc; |
257 | if (copy_from_user(umap.red, cmap->red, size) || | 275 | if (copy_from_user(umap.red, cmap->red, size) || |
diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/geode/lxfb_ops.c index bc35a95e59d4..85ec7f64c42a 100644 --- a/drivers/video/geode/lxfb_ops.c +++ b/drivers/video/geode/lxfb_ops.c | |||
@@ -276,10 +276,10 @@ static void lx_graphics_enable(struct fb_info *info) | |||
276 | write_fp(par, FP_PT1, 0); | 276 | write_fp(par, FP_PT1, 0); |
277 | temp = FP_PT2_SCRC; | 277 | temp = FP_PT2_SCRC; |
278 | 278 | ||
279 | if (info->var.sync & FB_SYNC_HOR_HIGH_ACT) | 279 | if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT)) |
280 | temp |= FP_PT2_HSP; | 280 | temp |= FP_PT2_HSP; |
281 | 281 | ||
282 | if (info->var.sync & FB_SYNC_VERT_HIGH_ACT) | 282 | if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT)) |
283 | temp |= FP_PT2_VSP; | 283 | temp |= FP_PT2_VSP; |
284 | 284 | ||
285 | write_fp(par, FP_PT2, temp); | 285 | write_fp(par, FP_PT2, temp); |
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index 7cfc170bce19..ca0f6be9d12e 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/clk.h> | 27 | #include <linux/clk.h> |
28 | #include <linux/mutex.h> | 28 | #include <linux/mutex.h> |
29 | 29 | ||
30 | #include <mach/dma.h> | ||
30 | #include <mach/hardware.h> | 31 | #include <mach/hardware.h> |
31 | #include <mach/ipu.h> | 32 | #include <mach/ipu.h> |
32 | #include <mach/mx3fb.h> | 33 | #include <mach/mx3fb.h> |
@@ -1420,6 +1421,9 @@ static bool chan_filter(struct dma_chan *chan, void *arg) | |||
1420 | struct device *dev; | 1421 | struct device *dev; |
1421 | struct mx3fb_platform_data *mx3fb_pdata; | 1422 | struct mx3fb_platform_data *mx3fb_pdata; |
1422 | 1423 | ||
1424 | if (!imx_dma_is_ipu(chan)) | ||
1425 | return false; | ||
1426 | |||
1423 | if (!rq) | 1427 | if (!rq) |
1424 | return false; | 1428 | return false; |
1425 | 1429 | ||
diff --git a/drivers/video/omap2/vram.c b/drivers/video/omap2/vram.c index fed2a72bc6b6..2fd7e5271be9 100644 --- a/drivers/video/omap2/vram.c +++ b/drivers/video/omap2/vram.c | |||
@@ -554,9 +554,15 @@ void __init omap_vram_reserve_sdram_memblock(void) | |||
554 | size = PAGE_ALIGN(size); | 554 | size = PAGE_ALIGN(size); |
555 | 555 | ||
556 | if (paddr) { | 556 | if (paddr) { |
557 | if ((paddr & ~PAGE_MASK) || | 557 | if (paddr & ~PAGE_MASK) { |
558 | !memblock_is_region_memory(paddr, size)) { | 558 | pr_err("VRAM start address 0x%08x not page aligned\n", |
559 | pr_err("Illegal SDRAM region for VRAM\n"); | 559 | paddr); |
560 | return; | ||
561 | } | ||
562 | |||
563 | if (!memblock_is_region_memory(paddr, size)) { | ||
564 | pr_err("Illegal SDRAM region 0x%08x..0x%08x for VRAM\n", | ||
565 | paddr, paddr + size - 1); | ||
560 | return; | 566 | return; |
561 | } | 567 | } |
562 | 568 | ||
@@ -570,9 +576,12 @@ void __init omap_vram_reserve_sdram_memblock(void) | |||
570 | return; | 576 | return; |
571 | } | 577 | } |
572 | } else { | 578 | } else { |
573 | paddr = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_REAL_LIMIT); | 579 | paddr = memblock_alloc(size, PAGE_SIZE); |
574 | } | 580 | } |
575 | 581 | ||
582 | memblock_free(paddr, size); | ||
583 | memblock_remove(paddr, size); | ||
584 | |||
576 | omap_vram_add_region(paddr, size); | 585 | omap_vram_add_region(paddr, size); |
577 | 586 | ||
578 | pr_info("Reserving %u bytes SDRAM for VRAM\n", size); | 587 | pr_info("Reserving %u bytes SDRAM for VRAM\n", size); |
diff --git a/drivers/video/riva/rivafb-i2c.c b/drivers/video/riva/rivafb-i2c.c index a0e22ac483a3..167400e2a182 100644 --- a/drivers/video/riva/rivafb-i2c.c +++ b/drivers/video/riva/rivafb-i2c.c | |||
@@ -94,7 +94,6 @@ static int __devinit riva_setup_i2c_bus(struct riva_i2c_chan *chan, | |||
94 | 94 | ||
95 | strcpy(chan->adapter.name, name); | 95 | strcpy(chan->adapter.name, name); |
96 | chan->adapter.owner = THIS_MODULE; | 96 | chan->adapter.owner = THIS_MODULE; |
97 | chan->adapter.id = I2C_HW_B_RIVA; | ||
98 | chan->adapter.class = i2c_class; | 97 | chan->adapter.class = i2c_class; |
99 | chan->adapter.algo_data = &chan->algo; | 98 | chan->adapter.algo_data = &chan->algo; |
100 | chan->adapter.dev.parent = &chan->par->pdev->dev; | 99 | chan->adapter.dev.parent = &chan->par->pdev->dev; |
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c index 55b3077ff6ff..d7df10315d8d 100644 --- a/drivers/video/sh_mobile_hdmi.c +++ b/drivers/video/sh_mobile_hdmi.c | |||
@@ -1071,6 +1071,10 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work) | |||
1071 | if (!hdmi->info) | 1071 | if (!hdmi->info) |
1072 | goto out; | 1072 | goto out; |
1073 | 1073 | ||
1074 | hdmi->monspec.modedb_len = 0; | ||
1075 | fb_destroy_modedb(hdmi->monspec.modedb); | ||
1076 | hdmi->monspec.modedb = NULL; | ||
1077 | |||
1074 | acquire_console_sem(); | 1078 | acquire_console_sem(); |
1075 | 1079 | ||
1076 | /* HDMI disconnect */ | 1080 | /* HDMI disconnect */ |
@@ -1078,7 +1082,6 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work) | |||
1078 | 1082 | ||
1079 | release_console_sem(); | 1083 | release_console_sem(); |
1080 | pm_runtime_put(hdmi->dev); | 1084 | pm_runtime_put(hdmi->dev); |
1081 | fb_destroy_modedb(hdmi->monspec.modedb); | ||
1082 | } | 1085 | } |
1083 | 1086 | ||
1084 | out: | 1087 | out: |
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 50963739a409..b02d97a879d6 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c | |||
@@ -115,15 +115,16 @@ static const struct fb_videomode default_720p = { | |||
115 | .xres = 1280, | 115 | .xres = 1280, |
116 | .yres = 720, | 116 | .yres = 720, |
117 | 117 | ||
118 | .left_margin = 200, | 118 | .left_margin = 220, |
119 | .right_margin = 88, | 119 | .right_margin = 110, |
120 | .hsync_len = 48, | 120 | .hsync_len = 40, |
121 | 121 | ||
122 | .upper_margin = 20, | 122 | .upper_margin = 20, |
123 | .lower_margin = 5, | 123 | .lower_margin = 5, |
124 | .vsync_len = 5, | 124 | .vsync_len = 5, |
125 | 125 | ||
126 | .pixclock = 13468, | 126 | .pixclock = 13468, |
127 | .refresh = 60, | ||
127 | .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT, | 128 | .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT, |
128 | }; | 129 | }; |
129 | 130 | ||
@@ -859,7 +860,7 @@ static void sh_mobile_fb_reconfig(struct fb_info *info) | |||
859 | /* Couldn't reconfigure, hopefully, can continue as before */ | 860 | /* Couldn't reconfigure, hopefully, can continue as before */ |
860 | return; | 861 | return; |
861 | 862 | ||
862 | info->fix.line_length = mode2.xres * (ch->cfg.bpp / 8); | 863 | info->fix.line_length = mode1.xres * (ch->cfg.bpp / 8); |
863 | 864 | ||
864 | /* | 865 | /* |
865 | * fb_set_var() calls the notifier change internally, only if | 866 | * fb_set_var() calls the notifier change internally, only if |
@@ -867,7 +868,7 @@ static void sh_mobile_fb_reconfig(struct fb_info *info) | |||
867 | * user event, we have to call the chain ourselves. | 868 | * user event, we have to call the chain ourselves. |
868 | */ | 869 | */ |
869 | event.info = info; | 870 | event.info = info; |
870 | event.data = &mode2; | 871 | event.data = &mode1; |
871 | fb_notifier_call_chain(evnt, &event); | 872 | fb_notifier_call_chain(evnt, &event); |
872 | } | 873 | } |
873 | 874 | ||
@@ -1197,6 +1198,7 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
1197 | const struct fb_videomode *mode = cfg->lcd_cfg; | 1198 | const struct fb_videomode *mode = cfg->lcd_cfg; |
1198 | unsigned long max_size = 0; | 1199 | unsigned long max_size = 0; |
1199 | int k; | 1200 | int k; |
1201 | int num_cfg; | ||
1200 | 1202 | ||
1201 | ch->info = framebuffer_alloc(0, &pdev->dev); | 1203 | ch->info = framebuffer_alloc(0, &pdev->dev); |
1202 | if (!ch->info) { | 1204 | if (!ch->info) { |
@@ -1232,8 +1234,14 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
1232 | info->fix = sh_mobile_lcdc_fix; | 1234 | info->fix = sh_mobile_lcdc_fix; |
1233 | info->fix.smem_len = max_size * (cfg->bpp / 8) * 2; | 1235 | info->fix.smem_len = max_size * (cfg->bpp / 8) * 2; |
1234 | 1236 | ||
1235 | if (!mode) | 1237 | if (!mode) { |
1236 | mode = &default_720p; | 1238 | mode = &default_720p; |
1239 | num_cfg = 1; | ||
1240 | } else { | ||
1241 | num_cfg = ch->cfg.num_cfg; | ||
1242 | } | ||
1243 | |||
1244 | fb_videomode_to_modelist(mode, num_cfg, &info->modelist); | ||
1237 | 1245 | ||
1238 | fb_videomode_to_var(var, mode); | 1246 | fb_videomode_to_var(var, mode); |
1239 | /* Default Y virtual resolution is 2x panel size */ | 1247 | /* Default Y virtual resolution is 2x panel size */ |
@@ -1281,10 +1289,6 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
1281 | 1289 | ||
1282 | for (i = 0; i < j; i++) { | 1290 | for (i = 0; i < j; i++) { |
1283 | struct sh_mobile_lcdc_chan *ch = priv->ch + i; | 1291 | struct sh_mobile_lcdc_chan *ch = priv->ch + i; |
1284 | const struct fb_videomode *mode = ch->cfg.lcd_cfg; | ||
1285 | |||
1286 | if (!mode) | ||
1287 | mode = &default_720p; | ||
1288 | 1292 | ||
1289 | info = ch->info; | 1293 | info = ch->info; |
1290 | 1294 | ||
@@ -1297,7 +1301,6 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
1297 | } | 1301 | } |
1298 | } | 1302 | } |
1299 | 1303 | ||
1300 | fb_videomode_to_modelist(mode, ch->cfg.num_cfg, &info->modelist); | ||
1301 | error = register_framebuffer(info); | 1304 | error = register_framebuffer(info); |
1302 | if (error < 0) | 1305 | if (error < 0) |
1303 | goto err1; | 1306 | goto err1; |
diff --git a/drivers/video/sis/init.c b/drivers/video/sis/init.c index c311ad3c3687..31137adc8fba 100644 --- a/drivers/video/sis/init.c +++ b/drivers/video/sis/init.c | |||
@@ -62,11 +62,11 @@ | |||
62 | 62 | ||
63 | #include "init.h" | 63 | #include "init.h" |
64 | 64 | ||
65 | #ifdef SIS300 | 65 | #ifdef CONFIG_FB_SIS_300 |
66 | #include "300vtbl.h" | 66 | #include "300vtbl.h" |
67 | #endif | 67 | #endif |
68 | 68 | ||
69 | #ifdef SIS315H | 69 | #ifdef CONFIG_FB_SIS_315 |
70 | #include "310vtbl.h" | 70 | #include "310vtbl.h" |
71 | #endif | 71 | #endif |
72 | 72 | ||
@@ -78,7 +78,7 @@ | |||
78 | /* POINTER INITIALIZATION */ | 78 | /* POINTER INITIALIZATION */ |
79 | /*********************************************/ | 79 | /*********************************************/ |
80 | 80 | ||
81 | #if defined(SIS300) || defined(SIS315H) | 81 | #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) |
82 | static void | 82 | static void |
83 | InitCommonPointer(struct SiS_Private *SiS_Pr) | 83 | InitCommonPointer(struct SiS_Private *SiS_Pr) |
84 | { | 84 | { |
@@ -160,7 +160,7 @@ InitCommonPointer(struct SiS_Private *SiS_Pr) | |||
160 | } | 160 | } |
161 | #endif | 161 | #endif |
162 | 162 | ||
163 | #ifdef SIS300 | 163 | #ifdef CONFIG_FB_SIS_300 |
164 | static void | 164 | static void |
165 | InitTo300Pointer(struct SiS_Private *SiS_Pr) | 165 | InitTo300Pointer(struct SiS_Private *SiS_Pr) |
166 | { | 166 | { |
@@ -237,7 +237,7 @@ InitTo300Pointer(struct SiS_Private *SiS_Pr) | |||
237 | } | 237 | } |
238 | #endif | 238 | #endif |
239 | 239 | ||
240 | #ifdef SIS315H | 240 | #ifdef CONFIG_FB_SIS_315 |
241 | static void | 241 | static void |
242 | InitTo310Pointer(struct SiS_Private *SiS_Pr) | 242 | InitTo310Pointer(struct SiS_Private *SiS_Pr) |
243 | { | 243 | { |
@@ -321,13 +321,13 @@ bool | |||
321 | SiSInitPtr(struct SiS_Private *SiS_Pr) | 321 | SiSInitPtr(struct SiS_Private *SiS_Pr) |
322 | { | 322 | { |
323 | if(SiS_Pr->ChipType < SIS_315H) { | 323 | if(SiS_Pr->ChipType < SIS_315H) { |
324 | #ifdef SIS300 | 324 | #ifdef CONFIG_FB_SIS_300 |
325 | InitTo300Pointer(SiS_Pr); | 325 | InitTo300Pointer(SiS_Pr); |
326 | #else | 326 | #else |
327 | return false; | 327 | return false; |
328 | #endif | 328 | #endif |
329 | } else { | 329 | } else { |
330 | #ifdef SIS315H | 330 | #ifdef CONFIG_FB_SIS_315 |
331 | InitTo310Pointer(SiS_Pr); | 331 | InitTo310Pointer(SiS_Pr); |
332 | #else | 332 | #else |
333 | return false; | 333 | return false; |
@@ -340,9 +340,7 @@ SiSInitPtr(struct SiS_Private *SiS_Pr) | |||
340 | /* HELPER: Get ModeID */ | 340 | /* HELPER: Get ModeID */ |
341 | /*********************************************/ | 341 | /*********************************************/ |
342 | 342 | ||
343 | #ifndef SIS_XORG_XF86 | ||
344 | static | 343 | static |
345 | #endif | ||
346 | unsigned short | 344 | unsigned short |
347 | SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, | 345 | SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, |
348 | int Depth, bool FSTN, int LCDwidth, int LCDheight) | 346 | int Depth, bool FSTN, int LCDwidth, int LCDheight) |
@@ -884,51 +882,51 @@ SiS_GetModeID_VGA2(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDispl | |||
884 | void | 882 | void |
885 | SiS_SetReg(SISIOADDRESS port, unsigned short index, unsigned short data) | 883 | SiS_SetReg(SISIOADDRESS port, unsigned short index, unsigned short data) |
886 | { | 884 | { |
887 | OutPortByte(port, index); | 885 | outb((u8)index, port); |
888 | OutPortByte(port + 1, data); | 886 | outb((u8)data, port + 1); |
889 | } | 887 | } |
890 | 888 | ||
891 | void | 889 | void |
892 | SiS_SetRegByte(SISIOADDRESS port, unsigned short data) | 890 | SiS_SetRegByte(SISIOADDRESS port, unsigned short data) |
893 | { | 891 | { |
894 | OutPortByte(port, data); | 892 | outb((u8)data, port); |
895 | } | 893 | } |
896 | 894 | ||
897 | void | 895 | void |
898 | SiS_SetRegShort(SISIOADDRESS port, unsigned short data) | 896 | SiS_SetRegShort(SISIOADDRESS port, unsigned short data) |
899 | { | 897 | { |
900 | OutPortWord(port, data); | 898 | outw((u16)data, port); |
901 | } | 899 | } |
902 | 900 | ||
903 | void | 901 | void |
904 | SiS_SetRegLong(SISIOADDRESS port, unsigned int data) | 902 | SiS_SetRegLong(SISIOADDRESS port, unsigned int data) |
905 | { | 903 | { |
906 | OutPortLong(port, data); | 904 | outl((u32)data, port); |
907 | } | 905 | } |
908 | 906 | ||
909 | unsigned char | 907 | unsigned char |
910 | SiS_GetReg(SISIOADDRESS port, unsigned short index) | 908 | SiS_GetReg(SISIOADDRESS port, unsigned short index) |
911 | { | 909 | { |
912 | OutPortByte(port, index); | 910 | outb((u8)index, port); |
913 | return(InPortByte(port + 1)); | 911 | return inb(port + 1); |
914 | } | 912 | } |
915 | 913 | ||
916 | unsigned char | 914 | unsigned char |
917 | SiS_GetRegByte(SISIOADDRESS port) | 915 | SiS_GetRegByte(SISIOADDRESS port) |
918 | { | 916 | { |
919 | return(InPortByte(port)); | 917 | return inb(port); |
920 | } | 918 | } |
921 | 919 | ||
922 | unsigned short | 920 | unsigned short |
923 | SiS_GetRegShort(SISIOADDRESS port) | 921 | SiS_GetRegShort(SISIOADDRESS port) |
924 | { | 922 | { |
925 | return(InPortWord(port)); | 923 | return inw(port); |
926 | } | 924 | } |
927 | 925 | ||
928 | unsigned int | 926 | unsigned int |
929 | SiS_GetRegLong(SISIOADDRESS port) | 927 | SiS_GetRegLong(SISIOADDRESS port) |
930 | { | 928 | { |
931 | return(InPortLong(port)); | 929 | return inl(port); |
932 | } | 930 | } |
933 | 931 | ||
934 | void | 932 | void |
@@ -1089,7 +1087,7 @@ static void | |||
1089 | SiSInitPCIetc(struct SiS_Private *SiS_Pr) | 1087 | SiSInitPCIetc(struct SiS_Private *SiS_Pr) |
1090 | { | 1088 | { |
1091 | switch(SiS_Pr->ChipType) { | 1089 | switch(SiS_Pr->ChipType) { |
1092 | #ifdef SIS300 | 1090 | #ifdef CONFIG_FB_SIS_300 |
1093 | case SIS_300: | 1091 | case SIS_300: |
1094 | case SIS_540: | 1092 | case SIS_540: |
1095 | case SIS_630: | 1093 | case SIS_630: |
@@ -1108,7 +1106,7 @@ SiSInitPCIetc(struct SiS_Private *SiS_Pr) | |||
1108 | SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x5A); | 1106 | SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x5A); |
1109 | break; | 1107 | break; |
1110 | #endif | 1108 | #endif |
1111 | #ifdef SIS315H | 1109 | #ifdef CONFIG_FB_SIS_315 |
1112 | case SIS_315H: | 1110 | case SIS_315H: |
1113 | case SIS_315: | 1111 | case SIS_315: |
1114 | case SIS_315PRO: | 1112 | case SIS_315PRO: |
@@ -1152,9 +1150,7 @@ SiSInitPCIetc(struct SiS_Private *SiS_Pr) | |||
1152 | /* HELPER: SetLVDSetc */ | 1150 | /* HELPER: SetLVDSetc */ |
1153 | /*********************************************/ | 1151 | /*********************************************/ |
1154 | 1152 | ||
1155 | #ifdef SIS_LINUX_KERNEL | ||
1156 | static | 1153 | static |
1157 | #endif | ||
1158 | void | 1154 | void |
1159 | SiSSetLVDSetc(struct SiS_Private *SiS_Pr) | 1155 | SiSSetLVDSetc(struct SiS_Private *SiS_Pr) |
1160 | { | 1156 | { |
@@ -1174,7 +1170,7 @@ SiSSetLVDSetc(struct SiS_Private *SiS_Pr) | |||
1174 | if((temp == 1) || (temp == 2)) return; | 1170 | if((temp == 1) || (temp == 2)) return; |
1175 | 1171 | ||
1176 | switch(SiS_Pr->ChipType) { | 1172 | switch(SiS_Pr->ChipType) { |
1177 | #ifdef SIS300 | 1173 | #ifdef CONFIG_FB_SIS_300 |
1178 | case SIS_540: | 1174 | case SIS_540: |
1179 | case SIS_630: | 1175 | case SIS_630: |
1180 | case SIS_730: | 1176 | case SIS_730: |
@@ -1188,7 +1184,7 @@ SiSSetLVDSetc(struct SiS_Private *SiS_Pr) | |||
1188 | } | 1184 | } |
1189 | break; | 1185 | break; |
1190 | #endif | 1186 | #endif |
1191 | #ifdef SIS315H | 1187 | #ifdef CONFIG_FB_SIS_315 |
1192 | case SIS_550: | 1188 | case SIS_550: |
1193 | case SIS_650: | 1189 | case SIS_650: |
1194 | case SIS_740: | 1190 | case SIS_740: |
@@ -1420,9 +1416,7 @@ SiS_ResetSegmentRegisters(struct SiS_Private *SiS_Pr) | |||
1420 | /* HELPER: GetVBType */ | 1416 | /* HELPER: GetVBType */ |
1421 | /*********************************************/ | 1417 | /*********************************************/ |
1422 | 1418 | ||
1423 | #ifdef SIS_LINUX_KERNEL | ||
1424 | static | 1419 | static |
1425 | #endif | ||
1426 | void | 1420 | void |
1427 | SiS_GetVBType(struct SiS_Private *SiS_Pr) | 1421 | SiS_GetVBType(struct SiS_Private *SiS_Pr) |
1428 | { | 1422 | { |
@@ -1487,7 +1481,6 @@ SiS_GetVBType(struct SiS_Private *SiS_Pr) | |||
1487 | /* HELPER: Check RAM size */ | 1481 | /* HELPER: Check RAM size */ |
1488 | /*********************************************/ | 1482 | /*********************************************/ |
1489 | 1483 | ||
1490 | #ifdef SIS_LINUX_KERNEL | ||
1491 | static bool | 1484 | static bool |
1492 | SiS_CheckMemorySize(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | 1485 | SiS_CheckMemorySize(struct SiS_Private *SiS_Pr, unsigned short ModeNo, |
1493 | unsigned short ModeIdIndex) | 1486 | unsigned short ModeIdIndex) |
@@ -1501,13 +1494,12 @@ SiS_CheckMemorySize(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
1501 | if(AdapterMemSize < memorysize) return false; | 1494 | if(AdapterMemSize < memorysize) return false; |
1502 | return true; | 1495 | return true; |
1503 | } | 1496 | } |
1504 | #endif | ||
1505 | 1497 | ||
1506 | /*********************************************/ | 1498 | /*********************************************/ |
1507 | /* HELPER: Get DRAM type */ | 1499 | /* HELPER: Get DRAM type */ |
1508 | /*********************************************/ | 1500 | /*********************************************/ |
1509 | 1501 | ||
1510 | #ifdef SIS315H | 1502 | #ifdef CONFIG_FB_SIS_315 |
1511 | static unsigned char | 1503 | static unsigned char |
1512 | SiS_Get310DRAMType(struct SiS_Private *SiS_Pr) | 1504 | SiS_Get310DRAMType(struct SiS_Private *SiS_Pr) |
1513 | { | 1505 | { |
@@ -1574,7 +1566,6 @@ SiS_GetMCLK(struct SiS_Private *SiS_Pr) | |||
1574 | /* HELPER: ClearBuffer */ | 1566 | /* HELPER: ClearBuffer */ |
1575 | /*********************************************/ | 1567 | /*********************************************/ |
1576 | 1568 | ||
1577 | #ifdef SIS_LINUX_KERNEL | ||
1578 | static void | 1569 | static void |
1579 | SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | 1570 | SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo) |
1580 | { | 1571 | { |
@@ -1587,7 +1578,7 @@ SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
1587 | 1578 | ||
1588 | if(SiS_Pr->SiS_ModeType >= ModeEGA) { | 1579 | if(SiS_Pr->SiS_ModeType >= ModeEGA) { |
1589 | if(ModeNo > 0x13) { | 1580 | if(ModeNo > 0x13) { |
1590 | SiS_SetMemory(memaddr, memsize, 0); | 1581 | memset_io(memaddr, 0, memsize); |
1591 | } else { | 1582 | } else { |
1592 | pBuffer = (unsigned short SISIOMEMTYPE *)memaddr; | 1583 | pBuffer = (unsigned short SISIOMEMTYPE *)memaddr; |
1593 | for(i = 0; i < 0x4000; i++) writew(0x0000, &pBuffer[i]); | 1584 | for(i = 0; i < 0x4000; i++) writew(0x0000, &pBuffer[i]); |
@@ -1596,10 +1587,9 @@ SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
1596 | pBuffer = (unsigned short SISIOMEMTYPE *)memaddr; | 1587 | pBuffer = (unsigned short SISIOMEMTYPE *)memaddr; |
1597 | for(i = 0; i < 0x4000; i++) writew(0x0720, &pBuffer[i]); | 1588 | for(i = 0; i < 0x4000; i++) writew(0x0720, &pBuffer[i]); |
1598 | } else { | 1589 | } else { |
1599 | SiS_SetMemory(memaddr, 0x8000, 0); | 1590 | memset_io(memaddr, 0, 0x8000); |
1600 | } | 1591 | } |
1601 | } | 1592 | } |
1602 | #endif | ||
1603 | 1593 | ||
1604 | /*********************************************/ | 1594 | /*********************************************/ |
1605 | /* HELPER: SearchModeID */ | 1595 | /* HELPER: SearchModeID */ |
@@ -2132,7 +2122,7 @@ SiS_SetCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2132 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x14,0x4F); | 2122 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x14,0x4F); |
2133 | } | 2123 | } |
2134 | 2124 | ||
2135 | #ifdef SIS315H | 2125 | #ifdef CONFIG_FB_SIS_315 |
2136 | if(SiS_Pr->ChipType == XGI_20) { | 2126 | if(SiS_Pr->ChipType == XGI_20) { |
2137 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x04,crt1data[4] - 1); | 2127 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x04,crt1data[4] - 1); |
2138 | if(!(temp = crt1data[5] & 0x1f)) { | 2128 | if(!(temp = crt1data[5] & 0x1f)) { |
@@ -2215,7 +2205,7 @@ SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2215 | SiS_SetReg(SiS_Pr->SiS_P3c4,0x2c,clkb); | 2205 | SiS_SetReg(SiS_Pr->SiS_P3c4,0x2c,clkb); |
2216 | 2206 | ||
2217 | if(SiS_Pr->ChipType >= SIS_315H) { | 2207 | if(SiS_Pr->ChipType >= SIS_315H) { |
2218 | #ifdef SIS315H | 2208 | #ifdef CONFIG_FB_SIS_315 |
2219 | SiS_SetReg(SiS_Pr->SiS_P3c4,0x2D,0x01); | 2209 | SiS_SetReg(SiS_Pr->SiS_P3c4,0x2D,0x01); |
2220 | if(SiS_Pr->ChipType == XGI_20) { | 2210 | if(SiS_Pr->ChipType == XGI_20) { |
2221 | unsigned short mf = SiS_GetModeFlag(SiS_Pr, ModeNo, ModeIdIndex); | 2211 | unsigned short mf = SiS_GetModeFlag(SiS_Pr, ModeNo, ModeIdIndex); |
@@ -2236,7 +2226,7 @@ SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2236 | /* FIFO */ | 2226 | /* FIFO */ |
2237 | /*********************************************/ | 2227 | /*********************************************/ |
2238 | 2228 | ||
2239 | #ifdef SIS300 | 2229 | #ifdef CONFIG_FB_SIS_300 |
2240 | void | 2230 | void |
2241 | SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1, | 2231 | SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1, |
2242 | unsigned short *idx2) | 2232 | unsigned short *idx2) |
@@ -2506,11 +2496,7 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2506 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x09,0x80,data); | 2496 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x09,0x80,data); |
2507 | 2497 | ||
2508 | /* Write foreground and background queue */ | 2498 | /* Write foreground and background queue */ |
2509 | #ifdef SIS_LINUX_KERNEL | ||
2510 | templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50); | 2499 | templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50); |
2511 | #else | ||
2512 | templ = pciReadLong(0x00000000, 0x50); | ||
2513 | #endif | ||
2514 | 2500 | ||
2515 | if(SiS_Pr->ChipType == SIS_730) { | 2501 | if(SiS_Pr->ChipType == SIS_730) { |
2516 | 2502 | ||
@@ -2530,13 +2516,8 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2530 | 2516 | ||
2531 | } | 2517 | } |
2532 | 2518 | ||
2533 | #ifdef SIS_LINUX_KERNEL | ||
2534 | sisfb_write_nbridge_pci_dword(SiS_Pr, 0x50, templ); | 2519 | sisfb_write_nbridge_pci_dword(SiS_Pr, 0x50, templ); |
2535 | templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xA0); | 2520 | templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xA0); |
2536 | #else | ||
2537 | pciWriteLong(0x00000000, 0x50, templ); | ||
2538 | templ = pciReadLong(0x00000000, 0xA0); | ||
2539 | #endif | ||
2540 | 2521 | ||
2541 | /* GUI grant timer (PCI config 0xA3) */ | 2522 | /* GUI grant timer (PCI config 0xA3) */ |
2542 | if(SiS_Pr->ChipType == SIS_730) { | 2523 | if(SiS_Pr->ChipType == SIS_730) { |
@@ -2552,15 +2533,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2552 | 2533 | ||
2553 | } | 2534 | } |
2554 | 2535 | ||
2555 | #ifdef SIS_LINUX_KERNEL | ||
2556 | sisfb_write_nbridge_pci_dword(SiS_Pr, 0xA0, templ); | 2536 | sisfb_write_nbridge_pci_dword(SiS_Pr, 0xA0, templ); |
2557 | #else | ||
2558 | pciWriteLong(0x00000000, 0xA0, templ); | ||
2559 | #endif | ||
2560 | } | 2537 | } |
2561 | #endif /* SIS300 */ | 2538 | #endif /* CONFIG_FB_SIS_300 */ |
2562 | 2539 | ||
2563 | #ifdef SIS315H | 2540 | #ifdef CONFIG_FB_SIS_315 |
2564 | static void | 2541 | static void |
2565 | SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) | 2542 | SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) |
2566 | { | 2543 | { |
@@ -2612,7 +2589,7 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2612 | } | 2589 | } |
2613 | 2590 | ||
2614 | if(SiS_Pr->ChipType < SIS_315H) { | 2591 | if(SiS_Pr->ChipType < SIS_315H) { |
2615 | #ifdef SIS300 | 2592 | #ifdef CONFIG_FB_SIS_300 |
2616 | if(VCLK > 150) data |= 0x80; | 2593 | if(VCLK > 150) data |= 0x80; |
2617 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x07,0x7B,data); | 2594 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x07,0x7B,data); |
2618 | 2595 | ||
@@ -2621,7 +2598,7 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2621 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xF7,data); | 2598 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xF7,data); |
2622 | #endif | 2599 | #endif |
2623 | } else if(SiS_Pr->ChipType < XGI_20) { | 2600 | } else if(SiS_Pr->ChipType < XGI_20) { |
2624 | #ifdef SIS315H | 2601 | #ifdef CONFIG_FB_SIS_315 |
2625 | if(VCLK >= 166) data |= 0x0c; | 2602 | if(VCLK >= 166) data |= 0x0c; |
2626 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data); | 2603 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data); |
2627 | 2604 | ||
@@ -2630,7 +2607,7 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2630 | } | 2607 | } |
2631 | #endif | 2608 | #endif |
2632 | } else { | 2609 | } else { |
2633 | #ifdef SIS315H | 2610 | #ifdef CONFIG_FB_SIS_315 |
2634 | if(VCLK >= 200) data |= 0x0c; | 2611 | if(VCLK >= 200) data |= 0x0c; |
2635 | if(SiS_Pr->ChipType == XGI_20) data &= ~0x04; | 2612 | if(SiS_Pr->ChipType == XGI_20) data &= ~0x04; |
2636 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data); | 2613 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data); |
@@ -2675,7 +2652,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2675 | unsigned short ModeIdIndex, unsigned short RRTI) | 2652 | unsigned short ModeIdIndex, unsigned short RRTI) |
2676 | { | 2653 | { |
2677 | unsigned short data, infoflag = 0, modeflag, resindex; | 2654 | unsigned short data, infoflag = 0, modeflag, resindex; |
2678 | #ifdef SIS315H | 2655 | #ifdef CONFIG_FB_SIS_315 |
2679 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; | 2656 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; |
2680 | unsigned short data2, data3; | 2657 | unsigned short data2, data3; |
2681 | #endif | 2658 | #endif |
@@ -2736,7 +2713,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2736 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0F,0xB7,data); | 2713 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0F,0xB7,data); |
2737 | } | 2714 | } |
2738 | 2715 | ||
2739 | #ifdef SIS315H | 2716 | #ifdef CONFIG_FB_SIS_315 |
2740 | if(SiS_Pr->ChipType >= SIS_315H) { | 2717 | if(SiS_Pr->ChipType >= SIS_315H) { |
2741 | SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x31,0xfb); | 2718 | SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x31,0xfb); |
2742 | } | 2719 | } |
@@ -2826,7 +2803,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2826 | 2803 | ||
2827 | SiS_SetVCLKState(SiS_Pr, ModeNo, RRTI, ModeIdIndex); | 2804 | SiS_SetVCLKState(SiS_Pr, ModeNo, RRTI, ModeIdIndex); |
2828 | 2805 | ||
2829 | #ifdef SIS315H | 2806 | #ifdef CONFIG_FB_SIS_315 |
2830 | if(((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->ChipType < SIS_661)) || | 2807 | if(((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->ChipType < SIS_661)) || |
2831 | (SiS_Pr->ChipType == XGI_40)) { | 2808 | (SiS_Pr->ChipType == XGI_40)) { |
2832 | if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x31) & 0x40) { | 2809 | if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x31) & 0x40) { |
@@ -2845,7 +2822,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2845 | #endif | 2822 | #endif |
2846 | } | 2823 | } |
2847 | 2824 | ||
2848 | #ifdef SIS315H | 2825 | #ifdef CONFIG_FB_SIS_315 |
2849 | static void | 2826 | static void |
2850 | SiS_SetupDualChip(struct SiS_Private *SiS_Pr) | 2827 | SiS_SetupDualChip(struct SiS_Private *SiS_Pr) |
2851 | { | 2828 | { |
@@ -2999,11 +2976,6 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho | |||
2999 | SiS_Pr->SiS_SelectCRT2Rate = 0; | 2976 | SiS_Pr->SiS_SelectCRT2Rate = 0; |
3000 | SiS_Pr->SiS_SetFlag &= (~ProgrammingCRT2); | 2977 | SiS_Pr->SiS_SetFlag &= (~ProgrammingCRT2); |
3001 | 2978 | ||
3002 | #ifdef SIS_XORG_XF86 | ||
3003 | xf86DrvMsgVerb(0, X_PROBED, 4, "(init: VBType=0x%04x, VBInfo=0x%04x)\n", | ||
3004 | SiS_Pr->SiS_VBType, SiS_Pr->SiS_VBInfo); | ||
3005 | #endif | ||
3006 | |||
3007 | if(SiS_Pr->SiS_VBInfo & SetSimuScanMode) { | 2979 | if(SiS_Pr->SiS_VBInfo & SetSimuScanMode) { |
3008 | if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) { | 2980 | if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) { |
3009 | SiS_Pr->SiS_SetFlag |= ProgrammingCRT2; | 2981 | SiS_Pr->SiS_SetFlag |= ProgrammingCRT2; |
@@ -3028,7 +3000,7 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho | |||
3028 | } | 3000 | } |
3029 | 3001 | ||
3030 | switch(SiS_Pr->ChipType) { | 3002 | switch(SiS_Pr->ChipType) { |
3031 | #ifdef SIS300 | 3003 | #ifdef CONFIG_FB_SIS_300 |
3032 | case SIS_300: | 3004 | case SIS_300: |
3033 | SiS_SetCRT1FIFO_300(SiS_Pr, ModeNo, RefreshRateTableIndex); | 3005 | SiS_SetCRT1FIFO_300(SiS_Pr, ModeNo, RefreshRateTableIndex); |
3034 | break; | 3006 | break; |
@@ -3039,7 +3011,7 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho | |||
3039 | break; | 3011 | break; |
3040 | #endif | 3012 | #endif |
3041 | default: | 3013 | default: |
3042 | #ifdef SIS315H | 3014 | #ifdef CONFIG_FB_SIS_315 |
3043 | if(SiS_Pr->ChipType == XGI_20) { | 3015 | if(SiS_Pr->ChipType == XGI_20) { |
3044 | unsigned char sr2b = 0, sr2c = 0; | 3016 | unsigned char sr2b = 0, sr2c = 0; |
3045 | switch(ModeNo) { | 3017 | switch(ModeNo) { |
@@ -3062,7 +3034,7 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho | |||
3062 | 3034 | ||
3063 | SiS_SetCRT1ModeRegs(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); | 3035 | SiS_SetCRT1ModeRegs(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); |
3064 | 3036 | ||
3065 | #ifdef SIS315H | 3037 | #ifdef CONFIG_FB_SIS_315 |
3066 | if(SiS_Pr->ChipType == XGI_40) { | 3038 | if(SiS_Pr->ChipType == XGI_40) { |
3067 | SiS_SetupDualChip(SiS_Pr); | 3039 | SiS_SetupDualChip(SiS_Pr); |
3068 | } | 3040 | } |
@@ -3070,11 +3042,9 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho | |||
3070 | 3042 | ||
3071 | SiS_LoadDAC(SiS_Pr, ModeNo, ModeIdIndex); | 3043 | SiS_LoadDAC(SiS_Pr, ModeNo, ModeIdIndex); |
3072 | 3044 | ||
3073 | #ifdef SIS_LINUX_KERNEL | ||
3074 | if(SiS_Pr->SiS_flag_clearbuffer) { | 3045 | if(SiS_Pr->SiS_flag_clearbuffer) { |
3075 | SiS_ClearBuffer(SiS_Pr, ModeNo); | 3046 | SiS_ClearBuffer(SiS_Pr, ModeNo); |
3076 | } | 3047 | } |
3077 | #endif | ||
3078 | 3048 | ||
3079 | if(!(SiS_Pr->SiS_VBInfo & (SetSimuScanMode | SwitchCRT2 | SetCRT2ToLCDA))) { | 3049 | if(!(SiS_Pr->SiS_VBInfo & (SetSimuScanMode | SwitchCRT2 | SetCRT2ToLCDA))) { |
3080 | SiS_WaitRetrace1(SiS_Pr); | 3050 | SiS_WaitRetrace1(SiS_Pr); |
@@ -3104,7 +3074,7 @@ SiS_InitVB(struct SiS_Private *SiS_Pr) | |||
3104 | static void | 3074 | static void |
3105 | SiS_ResetVB(struct SiS_Private *SiS_Pr) | 3075 | SiS_ResetVB(struct SiS_Private *SiS_Pr) |
3106 | { | 3076 | { |
3107 | #ifdef SIS315H | 3077 | #ifdef CONFIG_FB_SIS_315 |
3108 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; | 3078 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; |
3109 | unsigned short temp; | 3079 | unsigned short temp; |
3110 | 3080 | ||
@@ -3139,7 +3109,7 @@ SiS_StrangeStuff(struct SiS_Private *SiS_Pr) | |||
3139 | * which locks CRT2 in some way to CRT1 timing. Disable | 3109 | * which locks CRT2 in some way to CRT1 timing. Disable |
3140 | * this here. | 3110 | * this here. |
3141 | */ | 3111 | */ |
3142 | #ifdef SIS315H | 3112 | #ifdef CONFIG_FB_SIS_315 |
3143 | if((IS_SIS651) || (IS_SISM650) || | 3113 | if((IS_SIS651) || (IS_SISM650) || |
3144 | SiS_Pr->ChipType == SIS_340 || | 3114 | SiS_Pr->ChipType == SIS_340 || |
3145 | SiS_Pr->ChipType == XGI_40) { | 3115 | SiS_Pr->ChipType == XGI_40) { |
@@ -3160,7 +3130,7 @@ SiS_StrangeStuff(struct SiS_Private *SiS_Pr) | |||
3160 | static void | 3130 | static void |
3161 | SiS_Handle760(struct SiS_Private *SiS_Pr) | 3131 | SiS_Handle760(struct SiS_Private *SiS_Pr) |
3162 | { | 3132 | { |
3163 | #ifdef SIS315H | 3133 | #ifdef CONFIG_FB_SIS_315 |
3164 | unsigned int somebase; | 3134 | unsigned int somebase; |
3165 | unsigned char temp1, temp2, temp3; | 3135 | unsigned char temp1, temp2, temp3; |
3166 | 3136 | ||
@@ -3170,11 +3140,7 @@ SiS_Handle760(struct SiS_Private *SiS_Pr) | |||
3170 | (!(SiS_Pr->SiS_SysFlags & SF_760UMA)) ) | 3140 | (!(SiS_Pr->SiS_SysFlags & SF_760UMA)) ) |
3171 | return; | 3141 | return; |
3172 | 3142 | ||
3173 | #ifdef SIS_LINUX_KERNEL | ||
3174 | somebase = sisfb_read_mio_pci_word(SiS_Pr, 0x74); | 3143 | somebase = sisfb_read_mio_pci_word(SiS_Pr, 0x74); |
3175 | #else | ||
3176 | somebase = pciReadWord(0x00001000, 0x74); | ||
3177 | #endif | ||
3178 | somebase &= 0xffff; | 3144 | somebase &= 0xffff; |
3179 | 3145 | ||
3180 | if(somebase == 0) return; | 3146 | if(somebase == 0) return; |
@@ -3190,105 +3156,34 @@ SiS_Handle760(struct SiS_Private *SiS_Pr) | |||
3190 | temp2 = 0x0b; | 3156 | temp2 = 0x0b; |
3191 | } | 3157 | } |
3192 | 3158 | ||
3193 | #ifdef SIS_LINUX_KERNEL | ||
3194 | sisfb_write_nbridge_pci_byte(SiS_Pr, 0x7e, temp1); | 3159 | sisfb_write_nbridge_pci_byte(SiS_Pr, 0x7e, temp1); |
3195 | sisfb_write_nbridge_pci_byte(SiS_Pr, 0x8d, temp2); | 3160 | sisfb_write_nbridge_pci_byte(SiS_Pr, 0x8d, temp2); |
3196 | #else | ||
3197 | pciWriteByte(0x00000000, 0x7e, temp1); | ||
3198 | pciWriteByte(0x00000000, 0x8d, temp2); | ||
3199 | #endif | ||
3200 | 3161 | ||
3201 | SiS_SetRegByte((somebase + 0x85), temp3); | 3162 | SiS_SetRegByte((somebase + 0x85), temp3); |
3202 | #endif | 3163 | #endif |
3203 | } | 3164 | } |
3204 | 3165 | ||
3205 | /*********************************************/ | 3166 | /*********************************************/ |
3206 | /* X.org/XFree86: SET SCREEN PITCH */ | ||
3207 | /*********************************************/ | ||
3208 | |||
3209 | #ifdef SIS_XORG_XF86 | ||
3210 | static void | ||
3211 | SiS_SetPitchCRT1(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn) | ||
3212 | { | ||
3213 | SISPtr pSiS = SISPTR(pScrn); | ||
3214 | unsigned short HDisplay = pSiS->scrnPitch >> 3; | ||
3215 | |||
3216 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x13,(HDisplay & 0xFF)); | ||
3217 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0E,0xF0,(HDisplay >> 8)); | ||
3218 | } | ||
3219 | |||
3220 | static void | ||
3221 | SiS_SetPitchCRT2(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn) | ||
3222 | { | ||
3223 | SISPtr pSiS = SISPTR(pScrn); | ||
3224 | unsigned short HDisplay = pSiS->scrnPitch2 >> 3; | ||
3225 | |||
3226 | /* Unlock CRT2 */ | ||
3227 | if(pSiS->VGAEngine == SIS_315_VGA) | ||
3228 | SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x2F, 0x01); | ||
3229 | else | ||
3230 | SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x24, 0x01); | ||
3231 | |||
3232 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x07,(HDisplay & 0xFF)); | ||
3233 | SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x09,0xF0,(HDisplay >> 8)); | ||
3234 | } | ||
3235 | |||
3236 | static void | ||
3237 | SiS_SetPitch(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn) | ||
3238 | { | ||
3239 | SISPtr pSiS = SISPTR(pScrn); | ||
3240 | bool isslavemode = false; | ||
3241 | |||
3242 | if( (pSiS->VBFlags2 & VB2_VIDEOBRIDGE) && | ||
3243 | ( ((pSiS->VGAEngine == SIS_300_VGA) && | ||
3244 | (SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0xa0) == 0x20) || | ||
3245 | ((pSiS->VGAEngine == SIS_315_VGA) && | ||
3246 | (SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x50) == 0x10) ) ) { | ||
3247 | isslavemode = true; | ||
3248 | } | ||
3249 | |||
3250 | /* We need to set pitch for CRT1 if bridge is in slave mode, too */ | ||
3251 | if((pSiS->VBFlags & DISPTYPE_DISP1) || (isslavemode)) { | ||
3252 | SiS_SetPitchCRT1(SiS_Pr, pScrn); | ||
3253 | } | ||
3254 | /* We must not set the pitch for CRT2 if bridge is in slave mode */ | ||
3255 | if((pSiS->VBFlags & DISPTYPE_DISP2) && (!isslavemode)) { | ||
3256 | SiS_SetPitchCRT2(SiS_Pr, pScrn); | ||
3257 | } | ||
3258 | } | ||
3259 | #endif | ||
3260 | |||
3261 | /*********************************************/ | ||
3262 | /* SiSSetMode() */ | 3167 | /* SiSSetMode() */ |
3263 | /*********************************************/ | 3168 | /*********************************************/ |
3264 | 3169 | ||
3265 | #ifdef SIS_XORG_XF86 | ||
3266 | /* We need pScrn for setting the pitch correctly */ | ||
3267 | bool | ||
3268 | SiSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, unsigned short ModeNo, bool dosetpitch) | ||
3269 | #else | ||
3270 | bool | 3170 | bool |
3271 | SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | 3171 | SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) |
3272 | #endif | ||
3273 | { | 3172 | { |
3274 | SISIOADDRESS BaseAddr = SiS_Pr->IOAddress; | 3173 | SISIOADDRESS BaseAddr = SiS_Pr->IOAddress; |
3275 | unsigned short RealModeNo, ModeIdIndex; | 3174 | unsigned short RealModeNo, ModeIdIndex; |
3276 | unsigned char backupreg = 0; | 3175 | unsigned char backupreg = 0; |
3277 | #ifdef SIS_LINUX_KERNEL | ||
3278 | unsigned short KeepLockReg; | 3176 | unsigned short KeepLockReg; |
3279 | 3177 | ||
3280 | SiS_Pr->UseCustomMode = false; | 3178 | SiS_Pr->UseCustomMode = false; |
3281 | SiS_Pr->CRT1UsesCustomMode = false; | 3179 | SiS_Pr->CRT1UsesCustomMode = false; |
3282 | #endif | ||
3283 | 3180 | ||
3284 | SiS_Pr->SiS_flag_clearbuffer = 0; | 3181 | SiS_Pr->SiS_flag_clearbuffer = 0; |
3285 | 3182 | ||
3286 | if(SiS_Pr->UseCustomMode) { | 3183 | if(SiS_Pr->UseCustomMode) { |
3287 | ModeNo = 0xfe; | 3184 | ModeNo = 0xfe; |
3288 | } else { | 3185 | } else { |
3289 | #ifdef SIS_LINUX_KERNEL | ||
3290 | if(!(ModeNo & 0x80)) SiS_Pr->SiS_flag_clearbuffer = 1; | 3186 | if(!(ModeNo & 0x80)) SiS_Pr->SiS_flag_clearbuffer = 1; |
3291 | #endif | ||
3292 | ModeNo &= 0x7f; | 3187 | ModeNo &= 0x7f; |
3293 | } | 3188 | } |
3294 | 3189 | ||
@@ -3301,13 +3196,8 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
3301 | SiS_GetSysFlags(SiS_Pr); | 3196 | SiS_GetSysFlags(SiS_Pr); |
3302 | 3197 | ||
3303 | SiS_Pr->SiS_VGAINFO = 0x11; | 3198 | SiS_Pr->SiS_VGAINFO = 0x11; |
3304 | #if defined(SIS_XORG_XF86) && (defined(i386) || defined(__i386) || defined(__i386__) || defined(__AMD64__) || defined(__amd64__) || defined(__x86_64__)) | ||
3305 | if(pScrn) SiS_Pr->SiS_VGAINFO = SiS_GetSetBIOSScratch(pScrn, 0x489, 0xff); | ||
3306 | #endif | ||
3307 | 3199 | ||
3308 | #ifdef SIS_LINUX_KERNEL | ||
3309 | KeepLockReg = SiS_GetReg(SiS_Pr->SiS_P3c4,0x05); | 3200 | KeepLockReg = SiS_GetReg(SiS_Pr->SiS_P3c4,0x05); |
3310 | #endif | ||
3311 | SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86); | 3201 | SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86); |
3312 | 3202 | ||
3313 | SiSInitPCIetc(SiS_Pr); | 3203 | SiSInitPCIetc(SiS_Pr); |
@@ -3344,12 +3234,10 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
3344 | SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex); | 3234 | SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex); |
3345 | SiS_SetLowModeTest(SiS_Pr, ModeNo); | 3235 | SiS_SetLowModeTest(SiS_Pr, ModeNo); |
3346 | 3236 | ||
3347 | #ifdef SIS_LINUX_KERNEL | ||
3348 | /* Check memory size (kernel framebuffer driver only) */ | 3237 | /* Check memory size (kernel framebuffer driver only) */ |
3349 | if(!SiS_CheckMemorySize(SiS_Pr, ModeNo, ModeIdIndex)) { | 3238 | if(!SiS_CheckMemorySize(SiS_Pr, ModeNo, ModeIdIndex)) { |
3350 | return false; | 3239 | return false; |
3351 | } | 3240 | } |
3352 | #endif | ||
3353 | 3241 | ||
3354 | SiS_OpenCRTC(SiS_Pr); | 3242 | SiS_OpenCRTC(SiS_Pr); |
3355 | 3243 | ||
@@ -3384,7 +3272,7 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
3384 | SiS_DisplayOn(SiS_Pr); | 3272 | SiS_DisplayOn(SiS_Pr); |
3385 | SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); | 3273 | SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); |
3386 | 3274 | ||
3387 | #ifdef SIS315H | 3275 | #ifdef CONFIG_FB_SIS_315 |
3388 | if(SiS_Pr->ChipType >= SIS_315H) { | 3276 | if(SiS_Pr->ChipType >= SIS_315H) { |
3389 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { | 3277 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { |
3390 | if(!(SiS_IsDualEdge(SiS_Pr))) { | 3278 | if(!(SiS_IsDualEdge(SiS_Pr))) { |
@@ -3396,7 +3284,7 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
3396 | 3284 | ||
3397 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { | 3285 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { |
3398 | if(SiS_Pr->ChipType >= SIS_315H) { | 3286 | if(SiS_Pr->ChipType >= SIS_315H) { |
3399 | #ifdef SIS315H | 3287 | #ifdef CONFIG_FB_SIS_315 |
3400 | if(!SiS_Pr->SiS_ROMNew) { | 3288 | if(!SiS_Pr->SiS_ROMNew) { |
3401 | if(SiS_IsVAMode(SiS_Pr)) { | 3289 | if(SiS_IsVAMode(SiS_Pr)) { |
3402 | SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x35,0x01); | 3290 | SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x35,0x01); |
@@ -3424,424 +3312,16 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
3424 | } | 3312 | } |
3425 | } | 3313 | } |
3426 | 3314 | ||
3427 | #ifdef SIS_XORG_XF86 | ||
3428 | if(pScrn) { | ||
3429 | /* SetPitch: Adapt to virtual size & position */ | ||
3430 | if((ModeNo > 0x13) && (dosetpitch)) { | ||
3431 | SiS_SetPitch(SiS_Pr, pScrn); | ||
3432 | } | ||
3433 | |||
3434 | /* Backup/Set ModeNo in BIOS scratch area */ | ||
3435 | SiS_GetSetModeID(pScrn, ModeNo); | ||
3436 | } | ||
3437 | #endif | ||
3438 | |||
3439 | SiS_CloseCRTC(SiS_Pr); | 3315 | SiS_CloseCRTC(SiS_Pr); |
3440 | 3316 | ||
3441 | SiS_Handle760(SiS_Pr); | 3317 | SiS_Handle760(SiS_Pr); |
3442 | 3318 | ||
3443 | #ifdef SIS_LINUX_KERNEL | ||
3444 | /* We never lock registers in XF86 */ | 3319 | /* We never lock registers in XF86 */ |
3445 | if(KeepLockReg != 0xA1) SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x00); | 3320 | if(KeepLockReg != 0xA1) SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x00); |
3446 | #endif | ||
3447 | 3321 | ||
3448 | return true; | 3322 | return true; |
3449 | } | 3323 | } |
3450 | 3324 | ||
3451 | /*********************************************/ | ||
3452 | /* X.org/XFree86: SiSBIOSSetMode() */ | ||
3453 | /* for non-Dual-Head mode */ | ||
3454 | /*********************************************/ | ||
3455 | |||
3456 | #ifdef SIS_XORG_XF86 | ||
3457 | bool | ||
3458 | SiSBIOSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, | ||
3459 | DisplayModePtr mode, bool IsCustom) | ||
3460 | { | ||
3461 | SISPtr pSiS = SISPTR(pScrn); | ||
3462 | unsigned short ModeNo = 0; | ||
3463 | |||
3464 | SiS_Pr->UseCustomMode = false; | ||
3465 | |||
3466 | if((IsCustom) && (SiS_CheckBuildCustomMode(pScrn, mode, pSiS->VBFlags))) { | ||
3467 | |||
3468 | xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, "Setting custom mode %dx%d\n", | ||
3469 | SiS_Pr->CHDisplay, | ||
3470 | (mode->Flags & V_INTERLACE ? SiS_Pr->CVDisplay * 2 : | ||
3471 | (mode->Flags & V_DBLSCAN ? SiS_Pr->CVDisplay / 2 : | ||
3472 | SiS_Pr->CVDisplay))); | ||
3473 | |||
3474 | } else { | ||
3475 | |||
3476 | /* Don't need vbflags here; checks done earlier */ | ||
3477 | ModeNo = SiS_GetModeNumber(pScrn, mode, pSiS->VBFlags); | ||
3478 | if(!ModeNo) return false; | ||
3479 | |||
3480 | xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, "Setting standard mode 0x%x\n", ModeNo); | ||
3481 | |||
3482 | } | ||
3483 | |||
3484 | return(SiSSetMode(SiS_Pr, pScrn, ModeNo, true)); | ||
3485 | } | ||
3486 | |||
3487 | /*********************************************/ | ||
3488 | /* X.org/XFree86: SiSBIOSSetModeCRT2() */ | ||
3489 | /* for Dual-Head modes */ | ||
3490 | /*********************************************/ | ||
3491 | |||
3492 | bool | ||
3493 | SiSBIOSSetModeCRT2(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, | ||
3494 | DisplayModePtr mode, bool IsCustom) | ||
3495 | { | ||
3496 | SISIOADDRESS BaseAddr = SiS_Pr->IOAddress; | ||
3497 | SISPtr pSiS = SISPTR(pScrn); | ||
3498 | #ifdef SISDUALHEAD | ||
3499 | SISEntPtr pSiSEnt = pSiS->entityPrivate; | ||
3500 | #endif | ||
3501 | unsigned short ModeIdIndex; | ||
3502 | unsigned short ModeNo = 0; | ||
3503 | unsigned char backupreg = 0; | ||
3504 | |||
3505 | SiS_Pr->UseCustomMode = false; | ||
3506 | |||
3507 | /* Remember: Custom modes for CRT2 are ONLY supported | ||
3508 | * -) on the 30x/B/C, and | ||
3509 | * -) if CRT2 is LCD or VGA, or CRT1 is LCDA | ||
3510 | */ | ||
3511 | |||
3512 | if((IsCustom) && (SiS_CheckBuildCustomMode(pScrn, mode, pSiS->VBFlags))) { | ||
3513 | |||
3514 | ModeNo = 0xfe; | ||
3515 | |||
3516 | } else { | ||
3517 | |||
3518 | ModeNo = SiS_GetModeNumber(pScrn, mode, pSiS->VBFlags); | ||
3519 | if(!ModeNo) return false; | ||
3520 | |||
3521 | } | ||
3522 | |||
3523 | SiSRegInit(SiS_Pr, BaseAddr); | ||
3524 | SiSInitPtr(SiS_Pr); | ||
3525 | SiS_GetSysFlags(SiS_Pr); | ||
3526 | #if defined(i386) || defined(__i386) || defined(__i386__) || defined(__AMD64__) || defined(__amd64__) || defined(__x86_64__) | ||
3527 | SiS_Pr->SiS_VGAINFO = SiS_GetSetBIOSScratch(pScrn, 0x489, 0xff); | ||
3528 | #else | ||
3529 | SiS_Pr->SiS_VGAINFO = 0x11; | ||
3530 | #endif | ||
3531 | |||
3532 | SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86); | ||
3533 | |||
3534 | SiSInitPCIetc(SiS_Pr); | ||
3535 | SiSSetLVDSetc(SiS_Pr); | ||
3536 | SiSDetermineROMUsage(SiS_Pr); | ||
3537 | |||
3538 | /* Save mode info so we can set it from within SetMode for CRT1 */ | ||
3539 | #ifdef SISDUALHEAD | ||
3540 | if(pSiS->DualHeadMode) { | ||
3541 | pSiSEnt->CRT2ModeNo = ModeNo; | ||
3542 | pSiSEnt->CRT2DMode = mode; | ||
3543 | pSiSEnt->CRT2IsCustom = IsCustom; | ||
3544 | pSiSEnt->CRT2CR30 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x30); | ||
3545 | pSiSEnt->CRT2CR31 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x31); | ||
3546 | pSiSEnt->CRT2CR35 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); | ||
3547 | pSiSEnt->CRT2CR38 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); | ||
3548 | #if 0 | ||
3549 | /* We can't set CRT2 mode before CRT1 mode is set - says who...? */ | ||
3550 | if(pSiSEnt->CRT1ModeNo == -1) { | ||
3551 | xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, | ||
3552 | "Setting CRT2 mode delayed until after setting CRT1 mode\n"); | ||
3553 | return true; | ||
3554 | } | ||
3555 | #endif | ||
3556 | pSiSEnt->CRT2ModeSet = true; | ||
3557 | } | ||
3558 | #endif | ||
3559 | |||
3560 | if(SiS_Pr->UseCustomMode) { | ||
3561 | |||
3562 | unsigned short temptemp = SiS_Pr->CVDisplay; | ||
3563 | |||
3564 | if(SiS_Pr->CModeFlag & DoubleScanMode) temptemp >>= 1; | ||
3565 | else if(SiS_Pr->CInfoFlag & InterlaceMode) temptemp <<= 1; | ||
3566 | |||
3567 | xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, | ||
3568 | "Setting custom mode %dx%d on CRT2\n", | ||
3569 | SiS_Pr->CHDisplay, temptemp); | ||
3570 | |||
3571 | } else { | ||
3572 | |||
3573 | xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, | ||
3574 | "Setting standard mode 0x%x on CRT2\n", ModeNo); | ||
3575 | |||
3576 | } | ||
3577 | |||
3578 | SiS_UnLockCRT2(SiS_Pr); | ||
3579 | |||
3580 | if(!SiS_Pr->UseCustomMode) { | ||
3581 | if(!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) return false; | ||
3582 | } else { | ||
3583 | ModeIdIndex = 0; | ||
3584 | } | ||
3585 | |||
3586 | SiS_GetVBType(SiS_Pr); | ||
3587 | |||
3588 | SiS_InitVB(SiS_Pr); | ||
3589 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { | ||
3590 | if(SiS_Pr->ChipType >= SIS_315H) { | ||
3591 | SiS_ResetVB(SiS_Pr); | ||
3592 | SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x32,0x10); | ||
3593 | SiS_SetRegOR(SiS_Pr->SiS_Part2Port,0x00,0x0c); | ||
3594 | backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); | ||
3595 | } else { | ||
3596 | backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); | ||
3597 | } | ||
3598 | } | ||
3599 | |||
3600 | /* Get VB information (connectors, connected devices) */ | ||
3601 | if(!SiS_Pr->UseCustomMode) { | ||
3602 | SiS_GetVBInfo(SiS_Pr, ModeNo, ModeIdIndex, 1); | ||
3603 | } else { | ||
3604 | /* If this is a custom mode, we don't check the modeflag for CRT2Mode */ | ||
3605 | SiS_GetVBInfo(SiS_Pr, ModeNo, ModeIdIndex, 0); | ||
3606 | } | ||
3607 | SiS_SetYPbPr(SiS_Pr); | ||
3608 | SiS_SetTVMode(SiS_Pr, ModeNo, ModeIdIndex); | ||
3609 | SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex); | ||
3610 | SiS_SetLowModeTest(SiS_Pr, ModeNo); | ||
3611 | |||
3612 | SiS_ResetSegmentRegisters(SiS_Pr); | ||
3613 | |||
3614 | /* Set mode on CRT2 */ | ||
3615 | if( (SiS_Pr->SiS_VBType & VB_SISVB) || | ||
3616 | (SiS_Pr->SiS_IF_DEF_LVDS == 1) || | ||
3617 | (SiS_Pr->SiS_IF_DEF_CH70xx != 0) || | ||
3618 | (SiS_Pr->SiS_IF_DEF_TRUMPION != 0) ) { | ||
3619 | SiS_SetCRT2Group(SiS_Pr, ModeNo); | ||
3620 | } | ||
3621 | |||
3622 | SiS_StrangeStuff(SiS_Pr); | ||
3623 | |||
3624 | SiS_DisplayOn(SiS_Pr); | ||
3625 | SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); | ||
3626 | |||
3627 | if(SiS_Pr->ChipType >= SIS_315H) { | ||
3628 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { | ||
3629 | if(!(SiS_IsDualEdge(SiS_Pr))) { | ||
3630 | SiS_SetRegAND(SiS_Pr->SiS_Part1Port,0x13,0xfb); | ||
3631 | } | ||
3632 | } | ||
3633 | } | ||
3634 | |||
3635 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { | ||
3636 | if(SiS_Pr->ChipType >= SIS_315H) { | ||
3637 | if(!SiS_Pr->SiS_ROMNew) { | ||
3638 | if(SiS_IsVAMode(SiS_Pr)) { | ||
3639 | SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x35,0x01); | ||
3640 | } else { | ||
3641 | SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x35,0xFE); | ||
3642 | } | ||
3643 | } | ||
3644 | |||
3645 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,backupreg); | ||
3646 | |||
3647 | if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x30) & SetCRT2ToLCD) { | ||
3648 | SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x38,0xfc); | ||
3649 | } | ||
3650 | } else if((SiS_Pr->ChipType == SIS_630) || | ||
3651 | (SiS_Pr->ChipType == SIS_730)) { | ||
3652 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,backupreg); | ||
3653 | } | ||
3654 | } | ||
3655 | |||
3656 | /* SetPitch: Adapt to virtual size & position */ | ||
3657 | SiS_SetPitchCRT2(SiS_Pr, pScrn); | ||
3658 | |||
3659 | SiS_Handle760(SiS_Pr); | ||
3660 | |||
3661 | return true; | ||
3662 | } | ||
3663 | |||
3664 | /*********************************************/ | ||
3665 | /* X.org/XFree86: SiSBIOSSetModeCRT1() */ | ||
3666 | /* for Dual-Head modes */ | ||
3667 | /*********************************************/ | ||
3668 | |||
3669 | bool | ||
3670 | SiSBIOSSetModeCRT1(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, | ||
3671 | DisplayModePtr mode, bool IsCustom) | ||
3672 | { | ||
3673 | SISIOADDRESS BaseAddr = SiS_Pr->IOAddress; | ||
3674 | SISPtr pSiS = SISPTR(pScrn); | ||
3675 | unsigned short ModeIdIndex, ModeNo = 0; | ||
3676 | unsigned char backupreg = 0; | ||
3677 | #ifdef SISDUALHEAD | ||
3678 | SISEntPtr pSiSEnt = pSiS->entityPrivate; | ||
3679 | unsigned char backupcr30, backupcr31, backupcr38, backupcr35, backupp40d=0; | ||
3680 | bool backupcustom; | ||
3681 | #endif | ||
3682 | |||
3683 | SiS_Pr->UseCustomMode = false; | ||
3684 | |||
3685 | if((IsCustom) && (SiS_CheckBuildCustomMode(pScrn, mode, pSiS->VBFlags))) { | ||
3686 | |||
3687 | unsigned short temptemp = SiS_Pr->CVDisplay; | ||
3688 | |||
3689 | if(SiS_Pr->CModeFlag & DoubleScanMode) temptemp >>= 1; | ||
3690 | else if(SiS_Pr->CInfoFlag & InterlaceMode) temptemp <<= 1; | ||
3691 | |||
3692 | xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, | ||
3693 | "Setting custom mode %dx%d on CRT1\n", | ||
3694 | SiS_Pr->CHDisplay, temptemp); | ||
3695 | ModeNo = 0xfe; | ||
3696 | |||
3697 | } else { | ||
3698 | |||
3699 | ModeNo = SiS_GetModeNumber(pScrn, mode, 0); /* don't give VBFlags */ | ||
3700 | if(!ModeNo) return false; | ||
3701 | |||
3702 | xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, | ||
3703 | "Setting standard mode 0x%x on CRT1\n", ModeNo); | ||
3704 | } | ||
3705 | |||
3706 | SiSInitPtr(SiS_Pr); | ||
3707 | SiSRegInit(SiS_Pr, BaseAddr); | ||
3708 | SiS_GetSysFlags(SiS_Pr); | ||
3709 | #if defined(i386) || defined(__i386) || defined(__i386__) || defined(__AMD64__) || defined(__amd64__) || defined(__x86_64__) | ||
3710 | SiS_Pr->SiS_VGAINFO = SiS_GetSetBIOSScratch(pScrn, 0x489, 0xff); | ||
3711 | #else | ||
3712 | SiS_Pr->SiS_VGAINFO = 0x11; | ||
3713 | #endif | ||
3714 | |||
3715 | SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86); | ||
3716 | |||
3717 | SiSInitPCIetc(SiS_Pr); | ||
3718 | SiSSetLVDSetc(SiS_Pr); | ||
3719 | SiSDetermineROMUsage(SiS_Pr); | ||
3720 | |||
3721 | SiS_UnLockCRT2(SiS_Pr); | ||
3722 | |||
3723 | if(!SiS_Pr->UseCustomMode) { | ||
3724 | if(!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) return false; | ||
3725 | } else { | ||
3726 | ModeIdIndex = 0; | ||
3727 | } | ||
3728 | |||
3729 | /* Determine VBType */ | ||
3730 | SiS_GetVBType(SiS_Pr); | ||
3731 | |||
3732 | SiS_InitVB(SiS_Pr); | ||
3733 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { | ||
3734 | if(SiS_Pr->ChipType >= SIS_315H) { | ||
3735 | backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); | ||
3736 | } else { | ||
3737 | backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); | ||
3738 | } | ||
3739 | } | ||
3740 | |||
3741 | /* Get VB information (connectors, connected devices) */ | ||
3742 | /* (We don't care if the current mode is a CRT2 mode) */ | ||
3743 | SiS_GetVBInfo(SiS_Pr, ModeNo, ModeIdIndex, 0); | ||
3744 | SiS_SetYPbPr(SiS_Pr); | ||
3745 | SiS_SetTVMode(SiS_Pr, ModeNo, ModeIdIndex); | ||
3746 | SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex); | ||
3747 | SiS_SetLowModeTest(SiS_Pr, ModeNo); | ||
3748 | |||
3749 | SiS_OpenCRTC(SiS_Pr); | ||
3750 | |||
3751 | /* Set mode on CRT1 */ | ||
3752 | SiS_SetCRT1Group(SiS_Pr, ModeNo, ModeIdIndex); | ||
3753 | if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { | ||
3754 | SiS_SetCRT2Group(SiS_Pr, ModeNo); | ||
3755 | } | ||
3756 | |||
3757 | /* SetPitch: Adapt to virtual size & position */ | ||
3758 | SiS_SetPitchCRT1(SiS_Pr, pScrn); | ||
3759 | |||
3760 | SiS_HandleCRT1(SiS_Pr); | ||
3761 | |||
3762 | SiS_StrangeStuff(SiS_Pr); | ||
3763 | |||
3764 | SiS_CloseCRTC(SiS_Pr); | ||
3765 | |||
3766 | #ifdef SISDUALHEAD | ||
3767 | if(pSiS->DualHeadMode) { | ||
3768 | pSiSEnt->CRT1ModeNo = ModeNo; | ||
3769 | pSiSEnt->CRT1DMode = mode; | ||
3770 | } | ||
3771 | #endif | ||
3772 | |||
3773 | if(SiS_Pr->UseCustomMode) { | ||
3774 | SiS_Pr->CRT1UsesCustomMode = true; | ||
3775 | SiS_Pr->CSRClock_CRT1 = SiS_Pr->CSRClock; | ||
3776 | SiS_Pr->CModeFlag_CRT1 = SiS_Pr->CModeFlag; | ||
3777 | } else { | ||
3778 | SiS_Pr->CRT1UsesCustomMode = false; | ||
3779 | } | ||
3780 | |||
3781 | /* Reset CRT2 if changing mode on CRT1 */ | ||
3782 | #ifdef SISDUALHEAD | ||
3783 | if(pSiS->DualHeadMode) { | ||
3784 | if(pSiSEnt->CRT2ModeNo != -1) { | ||
3785 | xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, | ||
3786 | "(Re-)Setting mode for CRT2\n"); | ||
3787 | backupcustom = SiS_Pr->UseCustomMode; | ||
3788 | backupcr30 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x30); | ||
3789 | backupcr31 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x31); | ||
3790 | backupcr35 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); | ||
3791 | backupcr38 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); | ||
3792 | if(SiS_Pr->SiS_VBType & VB_SISVB) { | ||
3793 | /* Backup LUT-enable */ | ||
3794 | if(pSiSEnt->CRT2ModeSet) { | ||
3795 | backupp40d = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x0d) & 0x08; | ||
3796 | } | ||
3797 | } | ||
3798 | if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { | ||
3799 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x30,pSiSEnt->CRT2CR30); | ||
3800 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x31,pSiSEnt->CRT2CR31); | ||
3801 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,pSiSEnt->CRT2CR35); | ||
3802 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,pSiSEnt->CRT2CR38); | ||
3803 | } | ||
3804 | |||
3805 | SiSBIOSSetModeCRT2(SiS_Pr, pSiSEnt->pScrn_1, | ||
3806 | pSiSEnt->CRT2DMode, pSiSEnt->CRT2IsCustom); | ||
3807 | |||
3808 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x30,backupcr30); | ||
3809 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x31,backupcr31); | ||
3810 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,backupcr35); | ||
3811 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,backupcr38); | ||
3812 | if(SiS_Pr->SiS_VBType & VB_SISVB) { | ||
3813 | SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x0d, ~0x08, backupp40d); | ||
3814 | } | ||
3815 | SiS_Pr->UseCustomMode = backupcustom; | ||
3816 | } | ||
3817 | } | ||
3818 | #endif | ||
3819 | |||
3820 | /* Warning: From here, the custom mode entries in SiS_Pr are | ||
3821 | * possibly overwritten | ||
3822 | */ | ||
3823 | |||
3824 | SiS_DisplayOn(SiS_Pr); | ||
3825 | SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); | ||
3826 | |||
3827 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { | ||
3828 | if(SiS_Pr->ChipType >= SIS_315H) { | ||
3829 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,backupreg); | ||
3830 | } else if((SiS_Pr->ChipType == SIS_630) || | ||
3831 | (SiS_Pr->ChipType == SIS_730)) { | ||
3832 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,backupreg); | ||
3833 | } | ||
3834 | } | ||
3835 | |||
3836 | SiS_Handle760(SiS_Pr); | ||
3837 | |||
3838 | /* Backup/Set ModeNo in BIOS scratch area */ | ||
3839 | SiS_GetSetModeID(pScrn,ModeNo); | ||
3840 | |||
3841 | return true; | ||
3842 | } | ||
3843 | #endif /* Linux_XF86 */ | ||
3844 | |||
3845 | #ifndef GETBITSTR | 3325 | #ifndef GETBITSTR |
3846 | #define BITMASK(h,l) (((unsigned)(1U << ((h)-(l)+1))-1)<<(l)) | 3326 | #define BITMASK(h,l) (((unsigned)(1U << ((h)-(l)+1))-1)<<(l)) |
3847 | #define GENMASK(mask) BITMASK(1?mask,0?mask) | 3327 | #define GENMASK(mask) BITMASK(1?mask,0?mask) |
@@ -3927,7 +3407,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
3927 | SiS_Pr->CVBlankStart = SiS_Pr->SiS_VGAVDE; | 3407 | SiS_Pr->CVBlankStart = SiS_Pr->SiS_VGAVDE; |
3928 | 3408 | ||
3929 | if(SiS_Pr->ChipType < SIS_315H) { | 3409 | if(SiS_Pr->ChipType < SIS_315H) { |
3930 | #ifdef SIS300 | 3410 | #ifdef CONFIG_FB_SIS_300 |
3931 | tempbx = SiS_Pr->SiS_VGAHT; | 3411 | tempbx = SiS_Pr->SiS_VGAHT; |
3932 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { | 3412 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { |
3933 | tempbx = SiS_Pr->PanelHT; | 3413 | tempbx = SiS_Pr->PanelHT; |
@@ -3936,7 +3416,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
3936 | remaining = tempbx % 8; | 3416 | remaining = tempbx % 8; |
3937 | #endif | 3417 | #endif |
3938 | } else { | 3418 | } else { |
3939 | #ifdef SIS315H | 3419 | #ifdef CONFIG_FB_SIS_315 |
3940 | /* OK for LCDA, LVDS */ | 3420 | /* OK for LCDA, LVDS */ |
3941 | tempbx = SiS_Pr->PanelHT - SiS_Pr->PanelXRes; | 3421 | tempbx = SiS_Pr->PanelHT - SiS_Pr->PanelXRes; |
3942 | tempax = SiS_Pr->SiS_VGAHDE; /* not /2 ! */ | 3422 | tempax = SiS_Pr->SiS_VGAHDE; /* not /2 ! */ |
@@ -3950,7 +3430,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
3950 | SiS_Pr->CHTotal = SiS_Pr->CHBlankEnd = tempbx; | 3430 | SiS_Pr->CHTotal = SiS_Pr->CHBlankEnd = tempbx; |
3951 | 3431 | ||
3952 | if(SiS_Pr->ChipType < SIS_315H) { | 3432 | if(SiS_Pr->ChipType < SIS_315H) { |
3953 | #ifdef SIS300 | 3433 | #ifdef CONFIG_FB_SIS_300 |
3954 | if(SiS_Pr->SiS_VGAHDE == SiS_Pr->PanelXRes) { | 3434 | if(SiS_Pr->SiS_VGAHDE == SiS_Pr->PanelXRes) { |
3955 | SiS_Pr->CHSyncStart = SiS_Pr->SiS_VGAHDE + ((SiS_Pr->PanelHRS + 1) & ~1); | 3435 | SiS_Pr->CHSyncStart = SiS_Pr->SiS_VGAHDE + ((SiS_Pr->PanelHRS + 1) & ~1); |
3956 | SiS_Pr->CHSyncEnd = SiS_Pr->CHSyncStart + SiS_Pr->PanelHRE; | 3436 | SiS_Pr->CHSyncEnd = SiS_Pr->CHSyncStart + SiS_Pr->PanelHRE; |
@@ -3982,7 +3462,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
3982 | } | 3462 | } |
3983 | #endif | 3463 | #endif |
3984 | } else { | 3464 | } else { |
3985 | #ifdef SIS315H | 3465 | #ifdef CONFIG_FB_SIS_315 |
3986 | tempax = VGAHDE; | 3466 | tempax = VGAHDE; |
3987 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { | 3467 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { |
3988 | tempbx = SiS_Pr->PanelXRes; | 3468 | tempbx = SiS_Pr->PanelXRes; |
@@ -4001,7 +3481,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
4001 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { | 3481 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { |
4002 | tempax = SiS_Pr->PanelYRes; | 3482 | tempax = SiS_Pr->PanelYRes; |
4003 | } else if(SiS_Pr->ChipType < SIS_315H) { | 3483 | } else if(SiS_Pr->ChipType < SIS_315H) { |
4004 | #ifdef SIS300 | 3484 | #ifdef CONFIG_FB_SIS_300 |
4005 | /* Stupid hack for 640x400/320x200 */ | 3485 | /* Stupid hack for 640x400/320x200 */ |
4006 | if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) { | 3486 | if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) { |
4007 | if((tempax + tempbx) == 438) tempbx += 16; | 3487 | if((tempax + tempbx) == 438) tempbx += 16; |
@@ -4054,36 +3534,12 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
4054 | if(modeflag & DoubleScanMode) tempax |= 0x80; | 3534 | if(modeflag & DoubleScanMode) tempax |= 0x80; |
4055 | SiS_SetRegANDOR(SiS_Pr->SiS_P3d4,0x09,0x5F,tempax); | 3535 | SiS_SetRegANDOR(SiS_Pr->SiS_P3d4,0x09,0x5F,tempax); |
4056 | 3536 | ||
4057 | #ifdef SIS_XORG_XF86 | ||
4058 | #ifdef TWDEBUG | ||
4059 | xf86DrvMsg(0, X_INFO, "%d %d %d %d %d %d %d %d (%d %d %d %d)\n", | ||
4060 | SiS_Pr->CHDisplay, SiS_Pr->CHSyncStart, SiS_Pr->CHSyncEnd, SiS_Pr->CHTotal, | ||
4061 | SiS_Pr->CVDisplay, SiS_Pr->CVSyncStart, SiS_Pr->CVSyncEnd, SiS_Pr->CVTotal, | ||
4062 | SiS_Pr->CHBlankStart, SiS_Pr->CHBlankEnd, SiS_Pr->CVBlankStart, SiS_Pr->CVBlankEnd); | ||
4063 | xf86DrvMsg(0, X_INFO, " {{0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n", | ||
4064 | SiS_Pr->CCRT1CRTC[0], SiS_Pr->CCRT1CRTC[1], | ||
4065 | SiS_Pr->CCRT1CRTC[2], SiS_Pr->CCRT1CRTC[3], | ||
4066 | SiS_Pr->CCRT1CRTC[4], SiS_Pr->CCRT1CRTC[5], | ||
4067 | SiS_Pr->CCRT1CRTC[6], SiS_Pr->CCRT1CRTC[7]); | ||
4068 | xf86DrvMsg(0, X_INFO, " 0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n", | ||
4069 | SiS_Pr->CCRT1CRTC[8], SiS_Pr->CCRT1CRTC[9], | ||
4070 | SiS_Pr->CCRT1CRTC[10], SiS_Pr->CCRT1CRTC[11], | ||
4071 | SiS_Pr->CCRT1CRTC[12], SiS_Pr->CCRT1CRTC[13], | ||
4072 | SiS_Pr->CCRT1CRTC[14], SiS_Pr->CCRT1CRTC[15]); | ||
4073 | xf86DrvMsg(0, X_INFO, " 0x%02x}},\n", SiS_Pr->CCRT1CRTC[16]); | ||
4074 | #endif | ||
4075 | #endif | ||
4076 | } | 3537 | } |
4077 | 3538 | ||
4078 | void | 3539 | void |
4079 | SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, | 3540 | SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, |
4080 | int xres, int yres, | 3541 | int xres, int yres, |
4081 | #ifdef SIS_XORG_XF86 | ||
4082 | DisplayModePtr current | ||
4083 | #endif | ||
4084 | #ifdef SIS_LINUX_KERNEL | ||
4085 | struct fb_var_screeninfo *var, bool writeres | 3542 | struct fb_var_screeninfo *var, bool writeres |
4086 | #endif | ||
4087 | ) | 3543 | ) |
4088 | { | 3544 | { |
4089 | unsigned short HRE, HBE, HRS, HBS, HDE, HT; | 3545 | unsigned short HRE, HBE, HRS, HBS, HDE, HT; |
@@ -4127,25 +3583,10 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, | |||
4127 | 3583 | ||
4128 | D = B - F - C; | 3584 | D = B - F - C; |
4129 | 3585 | ||
4130 | #ifdef SIS_XORG_XF86 | ||
4131 | current->HDisplay = (E * 8); | ||
4132 | current->HSyncStart = (E * 8) + (F * 8); | ||
4133 | current->HSyncEnd = (E * 8) + (F * 8) + (C * 8); | ||
4134 | current->HTotal = (E * 8) + (F * 8) + (C * 8) + (D * 8); | ||
4135 | #ifdef TWDEBUG | ||
4136 | xf86DrvMsg(0, X_INFO, | ||
4137 | "H: A %d B %d C %d D %d E %d F %d HT %d HDE %d HRS %d HBS %d HBE %d HRE %d\n", | ||
4138 | A, B, C, D, E, F, HT, HDE, HRS, HBS, HBE, HRE); | ||
4139 | #else | ||
4140 | (void)VBS; (void)HBS; (void)A; | ||
4141 | #endif | ||
4142 | #endif | ||
4143 | #ifdef SIS_LINUX_KERNEL | ||
4144 | if(writeres) var->xres = xres = E * 8; | 3586 | if(writeres) var->xres = xres = E * 8; |
4145 | var->left_margin = D * 8; | 3587 | var->left_margin = D * 8; |
4146 | var->right_margin = F * 8; | 3588 | var->right_margin = F * 8; |
4147 | var->hsync_len = C * 8; | 3589 | var->hsync_len = C * 8; |
4148 | #endif | ||
4149 | 3590 | ||
4150 | /* Vertical */ | 3591 | /* Vertical */ |
4151 | sr_data = crdata[13]; | 3592 | sr_data = crdata[13]; |
@@ -4192,30 +3633,10 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, | |||
4192 | 3633 | ||
4193 | D = B - F - C; | 3634 | D = B - F - C; |
4194 | 3635 | ||
4195 | #ifdef SIS_XORG_XF86 | ||
4196 | current->VDisplay = VDE + 1; | ||
4197 | current->VSyncStart = VRS + 1; | ||
4198 | current->VSyncEnd = ((VRS & ~0x1f) | VRE) + 1; | ||
4199 | if(VRE <= (VRS & 0x1f)) current->VSyncEnd += 32; | ||
4200 | current->VTotal = E + D + C + F; | ||
4201 | #if 0 | ||
4202 | current->VDisplay = E; | ||
4203 | current->VSyncStart = E + D; | ||
4204 | current->VSyncEnd = E + D + C; | ||
4205 | current->VTotal = E + D + C + F; | ||
4206 | #endif | ||
4207 | #ifdef TWDEBUG | ||
4208 | xf86DrvMsg(0, X_INFO, | ||
4209 | "V: A %d B %d C %d D %d E %d F %d VT %d VDE %d VRS %d VBS %d VBE %d VRE %d\n", | ||
4210 | A, B, C, D, E, F, VT, VDE, VRS, VBS, VBE, VRE); | ||
4211 | #endif | ||
4212 | #endif | ||
4213 | #ifdef SIS_LINUX_KERNEL | ||
4214 | if(writeres) var->yres = yres = E; | 3636 | if(writeres) var->yres = yres = E; |
4215 | var->upper_margin = D; | 3637 | var->upper_margin = D; |
4216 | var->lower_margin = F; | 3638 | var->lower_margin = F; |
4217 | var->vsync_len = C; | 3639 | var->vsync_len = C; |
4218 | #endif | ||
4219 | 3640 | ||
4220 | if((xres == 320) && ((yres == 200) || (yres == 240))) { | 3641 | if((xres == 320) && ((yres == 200) || (yres == 240))) { |
4221 | /* Terrible hack, but correct CRTC data for | 3642 | /* Terrible hack, but correct CRTC data for |
@@ -4224,17 +3645,9 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, | |||
4224 | * a negative D. The CRT controller does not | 3645 | * a negative D. The CRT controller does not |
4225 | * seem to like correcting HRE to 50) | 3646 | * seem to like correcting HRE to 50) |
4226 | */ | 3647 | */ |
4227 | #ifdef SIS_XORG_XF86 | ||
4228 | current->HDisplay = 320; | ||
4229 | current->HSyncStart = 328; | ||
4230 | current->HSyncEnd = 376; | ||
4231 | current->HTotal = 400; | ||
4232 | #endif | ||
4233 | #ifdef SIS_LINUX_KERNEL | ||
4234 | var->left_margin = (400 - 376); | 3648 | var->left_margin = (400 - 376); |
4235 | var->right_margin = (328 - 320); | 3649 | var->right_margin = (328 - 320); |
4236 | var->hsync_len = (376 - 328); | 3650 | var->hsync_len = (376 - 328); |
4237 | #endif | ||
4238 | 3651 | ||
4239 | } | 3652 | } |
4240 | 3653 | ||
diff --git a/drivers/video/sis/init.h b/drivers/video/sis/init.h index b96005c39c67..ee8ed3c203da 100644 --- a/drivers/video/sis/init.h +++ b/drivers/video/sis/init.h | |||
@@ -53,21 +53,8 @@ | |||
53 | #ifndef _INIT_H_ | 53 | #ifndef _INIT_H_ |
54 | #define _INIT_H_ | 54 | #define _INIT_H_ |
55 | 55 | ||
56 | #include "osdef.h" | ||
57 | #include "initdef.h" | 56 | #include "initdef.h" |
58 | 57 | ||
59 | #ifdef SIS_XORG_XF86 | ||
60 | #include "sis.h" | ||
61 | #define SIS_NEED_inSISREG | ||
62 | #define SIS_NEED_inSISREGW | ||
63 | #define SIS_NEED_inSISREGL | ||
64 | #define SIS_NEED_outSISREG | ||
65 | #define SIS_NEED_outSISREGW | ||
66 | #define SIS_NEED_outSISREGL | ||
67 | #include "sis_regs.h" | ||
68 | #endif | ||
69 | |||
70 | #ifdef SIS_LINUX_KERNEL | ||
71 | #include "vgatypes.h" | 58 | #include "vgatypes.h" |
72 | #include "vstruct.h" | 59 | #include "vstruct.h" |
73 | #ifdef SIS_CP | 60 | #ifdef SIS_CP |
@@ -78,7 +65,6 @@ | |||
78 | #include <linux/fb.h> | 65 | #include <linux/fb.h> |
79 | #include "sis.h" | 66 | #include "sis.h" |
80 | #include <video/sisfb.h> | 67 | #include <video/sisfb.h> |
81 | #endif | ||
82 | 68 | ||
83 | /* Mode numbers */ | 69 | /* Mode numbers */ |
84 | static const unsigned short ModeIndex_320x200[] = {0x59, 0x41, 0x00, 0x4f}; | 70 | static const unsigned short ModeIndex_320x200[] = {0x59, 0x41, 0x00, 0x4f}; |
@@ -286,7 +272,7 @@ static const struct SiS_ModeResInfo_S SiS_ModeResInfo[] = | |||
286 | { 1280, 854, 8,16} /* 0x22 */ | 272 | { 1280, 854, 8,16} /* 0x22 */ |
287 | }; | 273 | }; |
288 | 274 | ||
289 | #if defined(SIS300) || defined(SIS315H) | 275 | #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) |
290 | static const struct SiS_StandTable_S SiS_StandTable[]= | 276 | static const struct SiS_StandTable_S SiS_StandTable[]= |
291 | { | 277 | { |
292 | /* 0x00: MD_0_200 */ | 278 | /* 0x00: MD_0_200 */ |
@@ -1521,10 +1507,6 @@ static const struct SiS_LVDSCRT1Data SiS_LVDSCRT1640x480_1_H[] = | |||
1521 | }; | 1507 | }; |
1522 | 1508 | ||
1523 | bool SiSInitPtr(struct SiS_Private *SiS_Pr); | 1509 | bool SiSInitPtr(struct SiS_Private *SiS_Pr); |
1524 | #ifdef SIS_XORG_XF86 | ||
1525 | unsigned short SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, | ||
1526 | int Depth, bool FSTN, int LCDwith, int LCDheight); | ||
1527 | #endif | ||
1528 | unsigned short SiS_GetModeID_LCD(int VGAEngine, unsigned int VBFlags, int HDisplay, | 1510 | unsigned short SiS_GetModeID_LCD(int VGAEngine, unsigned int VBFlags, int HDisplay, |
1529 | int VDisplay, int Depth, bool FSTN, | 1511 | int VDisplay, int Depth, bool FSTN, |
1530 | unsigned short CustomT, int LCDwith, int LCDheight, | 1512 | unsigned short CustomT, int LCDwith, int LCDheight, |
@@ -1550,17 +1532,11 @@ void SiS_SetRegOR(SISIOADDRESS Port,unsigned short Index, unsigned short DataOR | |||
1550 | void SiS_DisplayOn(struct SiS_Private *SiS_Pr); | 1532 | void SiS_DisplayOn(struct SiS_Private *SiS_Pr); |
1551 | void SiS_DisplayOff(struct SiS_Private *SiS_Pr); | 1533 | void SiS_DisplayOff(struct SiS_Private *SiS_Pr); |
1552 | void SiSRegInit(struct SiS_Private *SiS_Pr, SISIOADDRESS BaseAddr); | 1534 | void SiSRegInit(struct SiS_Private *SiS_Pr, SISIOADDRESS BaseAddr); |
1553 | #ifndef SIS_LINUX_KERNEL | ||
1554 | void SiSSetLVDSetc(struct SiS_Private *SiS_Pr); | ||
1555 | #endif | ||
1556 | void SiS_SetEnableDstn(struct SiS_Private *SiS_Pr, int enable); | 1535 | void SiS_SetEnableDstn(struct SiS_Private *SiS_Pr, int enable); |
1557 | void SiS_SetEnableFstn(struct SiS_Private *SiS_Pr, int enable); | 1536 | void SiS_SetEnableFstn(struct SiS_Private *SiS_Pr, int enable); |
1558 | unsigned short SiS_GetModeFlag(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | 1537 | unsigned short SiS_GetModeFlag(struct SiS_Private *SiS_Pr, unsigned short ModeNo, |
1559 | unsigned short ModeIdIndex); | 1538 | unsigned short ModeIdIndex); |
1560 | bool SiSDetermineROMLayout661(struct SiS_Private *SiS_Pr); | 1539 | bool SiSDetermineROMLayout661(struct SiS_Private *SiS_Pr); |
1561 | #ifndef SIS_LINUX_KERNEL | ||
1562 | void SiS_GetVBType(struct SiS_Private *SiS_Pr); | ||
1563 | #endif | ||
1564 | 1540 | ||
1565 | bool SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo, | 1541 | bool SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo, |
1566 | unsigned short *ModeIdIndex); | 1542 | unsigned short *ModeIdIndex); |
@@ -1572,37 +1548,19 @@ unsigned short SiS_GetColorDepth(struct SiS_Private *SiS_Pr, unsigned short Mode | |||
1572 | unsigned short ModeIdIndex); | 1548 | unsigned short ModeIdIndex); |
1573 | unsigned short SiS_GetOffset(struct SiS_Private *SiS_Pr,unsigned short ModeNo, | 1549 | unsigned short SiS_GetOffset(struct SiS_Private *SiS_Pr,unsigned short ModeNo, |
1574 | unsigned short ModeIdIndex, unsigned short RRTI); | 1550 | unsigned short ModeIdIndex, unsigned short RRTI); |
1575 | #ifdef SIS300 | 1551 | #ifdef CONFIG_FB_SIS_300 |
1576 | void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1, | 1552 | void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1, |
1577 | unsigned short *idx2); | 1553 | unsigned short *idx2); |
1578 | unsigned short SiS_GetFIFOThresholdB300(unsigned short idx1, unsigned short idx2); | 1554 | unsigned short SiS_GetFIFOThresholdB300(unsigned short idx1, unsigned short idx2); |
1579 | unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index); | 1555 | unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index); |
1580 | #endif | 1556 | #endif |
1581 | void SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); | 1557 | void SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); |
1582 | #ifdef SIS_XORG_XF86 | ||
1583 | bool SiSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, unsigned short ModeNo, | ||
1584 | bool dosetpitch); | ||
1585 | bool SiSBIOSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, | ||
1586 | DisplayModePtr mode, bool IsCustom); | ||
1587 | bool SiSBIOSSetModeCRT2(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, | ||
1588 | DisplayModePtr mode, bool IsCustom); | ||
1589 | bool SiSBIOSSetModeCRT1(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, | ||
1590 | DisplayModePtr mode, bool IsCustom); | ||
1591 | #endif | ||
1592 | #ifdef SIS_LINUX_KERNEL | ||
1593 | bool SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo); | 1558 | bool SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo); |
1594 | #endif | ||
1595 | void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth); | 1559 | void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth); |
1596 | void SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | 1560 | void SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, |
1597 | unsigned short ModeIdIndex); | 1561 | unsigned short ModeIdIndex); |
1598 | #ifdef SIS_XORG_XF86 | ||
1599 | void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, int xres, | ||
1600 | int yres, DisplayModePtr current); | ||
1601 | #endif | ||
1602 | #ifdef SIS_LINUX_KERNEL | ||
1603 | void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, int xres, | 1562 | void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, int xres, |
1604 | int yres, struct fb_var_screeninfo *var, bool writeres); | 1563 | int yres, struct fb_var_screeninfo *var, bool writeres); |
1605 | #endif | ||
1606 | 1564 | ||
1607 | /* From init301.c: */ | 1565 | /* From init301.c: */ |
1608 | extern void SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | 1566 | extern void SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, |
@@ -1626,29 +1584,16 @@ extern unsigned short SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short | |||
1626 | extern bool SiS_IsVAMode(struct SiS_Private *); | 1584 | extern bool SiS_IsVAMode(struct SiS_Private *); |
1627 | extern bool SiS_IsDualEdge(struct SiS_Private *); | 1585 | extern bool SiS_IsDualEdge(struct SiS_Private *); |
1628 | 1586 | ||
1629 | #ifdef SIS_XORG_XF86 | 1587 | #ifdef CONFIG_FB_SIS_300 |
1630 | /* From other modules: */ | ||
1631 | extern unsigned short SiS_CheckBuildCustomMode(ScrnInfoPtr pScrn, DisplayModePtr mode, | ||
1632 | unsigned int VBFlags); | ||
1633 | extern unsigned char SiS_GetSetBIOSScratch(ScrnInfoPtr pScrn, unsigned short offset, | ||
1634 | unsigned char value); | ||
1635 | extern unsigned char SiS_GetSetModeID(ScrnInfoPtr pScrn, unsigned char id); | ||
1636 | extern unsigned short SiS_GetModeNumber(ScrnInfoPtr pScrn, DisplayModePtr mode, | ||
1637 | unsigned int VBFlags); | ||
1638 | #endif | ||
1639 | |||
1640 | #ifdef SIS_LINUX_KERNEL | ||
1641 | #ifdef SIS300 | ||
1642 | extern unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg); | 1588 | extern unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg); |
1643 | extern void sisfb_write_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg, | 1589 | extern void sisfb_write_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg, |
1644 | unsigned int val); | 1590 | unsigned int val); |
1645 | #endif | 1591 | #endif |
1646 | #ifdef SIS315H | 1592 | #ifdef CONFIG_FB_SIS_315 |
1647 | extern void sisfb_write_nbridge_pci_byte(struct SiS_Private *SiS_Pr, int reg, | 1593 | extern void sisfb_write_nbridge_pci_byte(struct SiS_Private *SiS_Pr, int reg, |
1648 | unsigned char val); | 1594 | unsigned char val); |
1649 | extern unsigned int sisfb_read_mio_pci_word(struct SiS_Private *SiS_Pr, int reg); | 1595 | extern unsigned int sisfb_read_mio_pci_word(struct SiS_Private *SiS_Pr, int reg); |
1650 | #endif | 1596 | #endif |
1651 | #endif | ||
1652 | 1597 | ||
1653 | #endif | 1598 | #endif |
1654 | 1599 | ||
diff --git a/drivers/video/sis/init301.c b/drivers/video/sis/init301.c index da33d801c22e..9fa66fd4052a 100644 --- a/drivers/video/sis/init301.c +++ b/drivers/video/sis/init301.c | |||
@@ -75,11 +75,11 @@ | |||
75 | 75 | ||
76 | #include "init301.h" | 76 | #include "init301.h" |
77 | 77 | ||
78 | #ifdef SIS300 | 78 | #ifdef CONFIG_FB_SIS_300 |
79 | #include "oem300.h" | 79 | #include "oem300.h" |
80 | #endif | 80 | #endif |
81 | 81 | ||
82 | #ifdef SIS315H | 82 | #ifdef CONFIG_FB_SIS_315 |
83 | #include "oem310.h" | 83 | #include "oem310.h" |
84 | #endif | 84 | #endif |
85 | 85 | ||
@@ -87,9 +87,7 @@ | |||
87 | #define SiS_I2CDELAYSHORT 150 | 87 | #define SiS_I2CDELAYSHORT 150 |
88 | 88 | ||
89 | static unsigned short SiS_GetBIOSLCDResInfo(struct SiS_Private *SiS_Pr); | 89 | static unsigned short SiS_GetBIOSLCDResInfo(struct SiS_Private *SiS_Pr); |
90 | #ifdef SIS_LINUX_KERNEL | ||
91 | static void SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); | 90 | static void SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); |
92 | #endif | ||
93 | 91 | ||
94 | /*********************************************/ | 92 | /*********************************************/ |
95 | /* HELPER: Lock/Unlock CRT2 */ | 93 | /* HELPER: Lock/Unlock CRT2 */ |
@@ -106,9 +104,7 @@ SiS_UnLockCRT2(struct SiS_Private *SiS_Pr) | |||
106 | SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x24,0x01); | 104 | SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x24,0x01); |
107 | } | 105 | } |
108 | 106 | ||
109 | #ifdef SIS_LINUX_KERNEL | ||
110 | static | 107 | static |
111 | #endif | ||
112 | void | 108 | void |
113 | SiS_LockCRT2(struct SiS_Private *SiS_Pr) | 109 | SiS_LockCRT2(struct SiS_Private *SiS_Pr) |
114 | { | 110 | { |
@@ -138,7 +134,7 @@ SiS_SetRegSR11ANDOR(struct SiS_Private *SiS_Pr, unsigned short DataAND, unsigned | |||
138 | /* HELPER: Get Pointer to LCD structure */ | 134 | /* HELPER: Get Pointer to LCD structure */ |
139 | /*********************************************/ | 135 | /*********************************************/ |
140 | 136 | ||
141 | #ifdef SIS315H | 137 | #ifdef CONFIG_FB_SIS_315 |
142 | static unsigned char * | 138 | static unsigned char * |
143 | GetLCDStructPtr661(struct SiS_Private *SiS_Pr) | 139 | GetLCDStructPtr661(struct SiS_Private *SiS_Pr) |
144 | { | 140 | { |
@@ -404,7 +400,7 @@ SiS_SaveCRT2Info(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
404 | /* HELPER: GET SOME DATA FROM BIOS ROM */ | 400 | /* HELPER: GET SOME DATA FROM BIOS ROM */ |
405 | /*********************************************/ | 401 | /*********************************************/ |
406 | 402 | ||
407 | #ifdef SIS300 | 403 | #ifdef CONFIG_FB_SIS_300 |
408 | static bool | 404 | static bool |
409 | SiS_CR36BIOSWord23b(struct SiS_Private *SiS_Pr) | 405 | SiS_CR36BIOSWord23b(struct SiS_Private *SiS_Pr) |
410 | { | 406 | { |
@@ -449,7 +445,7 @@ SiS_DDC2Delay(struct SiS_Private *SiS_Pr, unsigned int delaytime) | |||
449 | SiS_GetReg(SiS_Pr->SiS_P3c4, 0x05); | 445 | SiS_GetReg(SiS_Pr->SiS_P3c4, 0x05); |
450 | } | 446 | } |
451 | 447 | ||
452 | #if defined(SIS300) || defined(SIS315H) | 448 | #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) |
453 | static void | 449 | static void |
454 | SiS_GenericDelay(struct SiS_Private *SiS_Pr, unsigned short delay) | 450 | SiS_GenericDelay(struct SiS_Private *SiS_Pr, unsigned short delay) |
455 | { | 451 | { |
@@ -457,7 +453,7 @@ SiS_GenericDelay(struct SiS_Private *SiS_Pr, unsigned short delay) | |||
457 | } | 453 | } |
458 | #endif | 454 | #endif |
459 | 455 | ||
460 | #ifdef SIS315H | 456 | #ifdef CONFIG_FB_SIS_315 |
461 | static void | 457 | static void |
462 | SiS_LongDelay(struct SiS_Private *SiS_Pr, unsigned short delay) | 458 | SiS_LongDelay(struct SiS_Private *SiS_Pr, unsigned short delay) |
463 | { | 459 | { |
@@ -467,7 +463,7 @@ SiS_LongDelay(struct SiS_Private *SiS_Pr, unsigned short delay) | |||
467 | } | 463 | } |
468 | #endif | 464 | #endif |
469 | 465 | ||
470 | #if defined(SIS300) || defined(SIS315H) | 466 | #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) |
471 | static void | 467 | static void |
472 | SiS_ShortDelay(struct SiS_Private *SiS_Pr, unsigned short delay) | 468 | SiS_ShortDelay(struct SiS_Private *SiS_Pr, unsigned short delay) |
473 | { | 469 | { |
@@ -480,14 +476,14 @@ SiS_ShortDelay(struct SiS_Private *SiS_Pr, unsigned short delay) | |||
480 | static void | 476 | static void |
481 | SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) | 477 | SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) |
482 | { | 478 | { |
483 | #if defined(SIS300) || defined(SIS315H) | 479 | #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) |
484 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; | 480 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; |
485 | unsigned short PanelID, DelayIndex, Delay=0; | 481 | unsigned short PanelID, DelayIndex, Delay=0; |
486 | #endif | 482 | #endif |
487 | 483 | ||
488 | if(SiS_Pr->ChipType < SIS_315H) { | 484 | if(SiS_Pr->ChipType < SIS_315H) { |
489 | 485 | ||
490 | #ifdef SIS300 | 486 | #ifdef CONFIG_FB_SIS_300 |
491 | 487 | ||
492 | PanelID = SiS_GetReg(SiS_Pr->SiS_P3d4,0x36); | 488 | PanelID = SiS_GetReg(SiS_Pr->SiS_P3d4,0x36); |
493 | if(SiS_Pr->SiS_VBType & VB_SISVB) { | 489 | if(SiS_Pr->SiS_VBType & VB_SISVB) { |
@@ -513,11 +509,11 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) | |||
513 | } | 509 | } |
514 | SiS_ShortDelay(SiS_Pr, Delay); | 510 | SiS_ShortDelay(SiS_Pr, Delay); |
515 | 511 | ||
516 | #endif /* SIS300 */ | 512 | #endif /* CONFIG_FB_SIS_300 */ |
517 | 513 | ||
518 | } else { | 514 | } else { |
519 | 515 | ||
520 | #ifdef SIS315H | 516 | #ifdef CONFIG_FB_SIS_315 |
521 | 517 | ||
522 | if((SiS_Pr->ChipType >= SIS_661) || | 518 | if((SiS_Pr->ChipType >= SIS_661) || |
523 | (SiS_Pr->ChipType <= SIS_315PRO) || | 519 | (SiS_Pr->ChipType <= SIS_315PRO) || |
@@ -579,12 +575,12 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) | |||
579 | 575 | ||
580 | } | 576 | } |
581 | 577 | ||
582 | #endif /* SIS315H */ | 578 | #endif /* CONFIG_FB_SIS_315 */ |
583 | 579 | ||
584 | } | 580 | } |
585 | } | 581 | } |
586 | 582 | ||
587 | #ifdef SIS315H | 583 | #ifdef CONFIG_FB_SIS_315 |
588 | static void | 584 | static void |
589 | SiS_PanelDelayLoop(struct SiS_Private *SiS_Pr, unsigned short DelayTime, unsigned short DelayLoop) | 585 | SiS_PanelDelayLoop(struct SiS_Private *SiS_Pr, unsigned short DelayTime, unsigned short DelayLoop) |
590 | { | 586 | { |
@@ -613,7 +609,7 @@ SiS_WaitRetrace1(struct SiS_Private *SiS_Pr) | |||
613 | while((!(SiS_GetRegByte(SiS_Pr->SiS_P3da) & 0x08)) && --watchdog); | 609 | while((!(SiS_GetRegByte(SiS_Pr->SiS_P3da) & 0x08)) && --watchdog); |
614 | } | 610 | } |
615 | 611 | ||
616 | #if defined(SIS300) || defined(SIS315H) | 612 | #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) |
617 | static void | 613 | static void |
618 | SiS_WaitRetrace2(struct SiS_Private *SiS_Pr, unsigned short reg) | 614 | SiS_WaitRetrace2(struct SiS_Private *SiS_Pr, unsigned short reg) |
619 | { | 615 | { |
@@ -630,7 +626,7 @@ static void | |||
630 | SiS_WaitVBRetrace(struct SiS_Private *SiS_Pr) | 626 | SiS_WaitVBRetrace(struct SiS_Private *SiS_Pr) |
631 | { | 627 | { |
632 | if(SiS_Pr->ChipType < SIS_315H) { | 628 | if(SiS_Pr->ChipType < SIS_315H) { |
633 | #ifdef SIS300 | 629 | #ifdef CONFIG_FB_SIS_300 |
634 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { | 630 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { |
635 | if(!(SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x20)) return; | 631 | if(!(SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x20)) return; |
636 | } | 632 | } |
@@ -641,7 +637,7 @@ SiS_WaitVBRetrace(struct SiS_Private *SiS_Pr) | |||
641 | } | 637 | } |
642 | #endif | 638 | #endif |
643 | } else { | 639 | } else { |
644 | #ifdef SIS315H | 640 | #ifdef CONFIG_FB_SIS_315 |
645 | if(!(SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x40)) { | 641 | if(!(SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x40)) { |
646 | SiS_WaitRetrace1(SiS_Pr); | 642 | SiS_WaitRetrace1(SiS_Pr); |
647 | } else { | 643 | } else { |
@@ -686,7 +682,7 @@ SiS_VBLongWait(struct SiS_Private *SiS_Pr) | |||
686 | /* HELPER: MISC */ | 682 | /* HELPER: MISC */ |
687 | /*********************************************/ | 683 | /*********************************************/ |
688 | 684 | ||
689 | #ifdef SIS300 | 685 | #ifdef CONFIG_FB_SIS_300 |
690 | static bool | 686 | static bool |
691 | SiS_Is301B(struct SiS_Private *SiS_Pr) | 687 | SiS_Is301B(struct SiS_Private *SiS_Pr) |
692 | { | 688 | { |
@@ -708,7 +704,7 @@ SiS_CRT2IsLCD(struct SiS_Private *SiS_Pr) | |||
708 | bool | 704 | bool |
709 | SiS_IsDualEdge(struct SiS_Private *SiS_Pr) | 705 | SiS_IsDualEdge(struct SiS_Private *SiS_Pr) |
710 | { | 706 | { |
711 | #ifdef SIS315H | 707 | #ifdef CONFIG_FB_SIS_315 |
712 | if(SiS_Pr->ChipType >= SIS_315H) { | 708 | if(SiS_Pr->ChipType >= SIS_315H) { |
713 | if((SiS_Pr->ChipType != SIS_650) || (SiS_GetReg(SiS_Pr->SiS_P3d4,0x5f) & 0xf0)) { | 709 | if((SiS_Pr->ChipType != SIS_650) || (SiS_GetReg(SiS_Pr->SiS_P3d4,0x5f) & 0xf0)) { |
714 | if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x38) & EnableDualEdge) return true; | 710 | if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x38) & EnableDualEdge) return true; |
@@ -721,7 +717,7 @@ SiS_IsDualEdge(struct SiS_Private *SiS_Pr) | |||
721 | bool | 717 | bool |
722 | SiS_IsVAMode(struct SiS_Private *SiS_Pr) | 718 | SiS_IsVAMode(struct SiS_Private *SiS_Pr) |
723 | { | 719 | { |
724 | #ifdef SIS315H | 720 | #ifdef CONFIG_FB_SIS_315 |
725 | unsigned short flag; | 721 | unsigned short flag; |
726 | 722 | ||
727 | if(SiS_Pr->ChipType >= SIS_315H) { | 723 | if(SiS_Pr->ChipType >= SIS_315H) { |
@@ -732,7 +728,7 @@ SiS_IsVAMode(struct SiS_Private *SiS_Pr) | |||
732 | return false; | 728 | return false; |
733 | } | 729 | } |
734 | 730 | ||
735 | #ifdef SIS315H | 731 | #ifdef CONFIG_FB_SIS_315 |
736 | static bool | 732 | static bool |
737 | SiS_IsVAorLCD(struct SiS_Private *SiS_Pr) | 733 | SiS_IsVAorLCD(struct SiS_Private *SiS_Pr) |
738 | { | 734 | { |
@@ -745,7 +741,7 @@ SiS_IsVAorLCD(struct SiS_Private *SiS_Pr) | |||
745 | static bool | 741 | static bool |
746 | SiS_IsDualLink(struct SiS_Private *SiS_Pr) | 742 | SiS_IsDualLink(struct SiS_Private *SiS_Pr) |
747 | { | 743 | { |
748 | #ifdef SIS315H | 744 | #ifdef CONFIG_FB_SIS_315 |
749 | if(SiS_Pr->ChipType >= SIS_315H) { | 745 | if(SiS_Pr->ChipType >= SIS_315H) { |
750 | if((SiS_CRT2IsLCD(SiS_Pr)) || | 746 | if((SiS_CRT2IsLCD(SiS_Pr)) || |
751 | (SiS_IsVAMode(SiS_Pr))) { | 747 | (SiS_IsVAMode(SiS_Pr))) { |
@@ -756,7 +752,7 @@ SiS_IsDualLink(struct SiS_Private *SiS_Pr) | |||
756 | return false; | 752 | return false; |
757 | } | 753 | } |
758 | 754 | ||
759 | #ifdef SIS315H | 755 | #ifdef CONFIG_FB_SIS_315 |
760 | static bool | 756 | static bool |
761 | SiS_TVEnabled(struct SiS_Private *SiS_Pr) | 757 | SiS_TVEnabled(struct SiS_Private *SiS_Pr) |
762 | { | 758 | { |
@@ -768,7 +764,7 @@ SiS_TVEnabled(struct SiS_Private *SiS_Pr) | |||
768 | } | 764 | } |
769 | #endif | 765 | #endif |
770 | 766 | ||
771 | #ifdef SIS315H | 767 | #ifdef CONFIG_FB_SIS_315 |
772 | static bool | 768 | static bool |
773 | SiS_LCDAEnabled(struct SiS_Private *SiS_Pr) | 769 | SiS_LCDAEnabled(struct SiS_Private *SiS_Pr) |
774 | { | 770 | { |
@@ -777,7 +773,7 @@ SiS_LCDAEnabled(struct SiS_Private *SiS_Pr) | |||
777 | } | 773 | } |
778 | #endif | 774 | #endif |
779 | 775 | ||
780 | #ifdef SIS315H | 776 | #ifdef CONFIG_FB_SIS_315 |
781 | static bool | 777 | static bool |
782 | SiS_WeHaveBacklightCtrl(struct SiS_Private *SiS_Pr) | 778 | SiS_WeHaveBacklightCtrl(struct SiS_Private *SiS_Pr) |
783 | { | 779 | { |
@@ -788,7 +784,7 @@ SiS_WeHaveBacklightCtrl(struct SiS_Private *SiS_Pr) | |||
788 | } | 784 | } |
789 | #endif | 785 | #endif |
790 | 786 | ||
791 | #ifdef SIS315H | 787 | #ifdef CONFIG_FB_SIS_315 |
792 | static bool | 788 | static bool |
793 | SiS_IsNotM650orLater(struct SiS_Private *SiS_Pr) | 789 | SiS_IsNotM650orLater(struct SiS_Private *SiS_Pr) |
794 | { | 790 | { |
@@ -804,7 +800,7 @@ SiS_IsNotM650orLater(struct SiS_Private *SiS_Pr) | |||
804 | } | 800 | } |
805 | #endif | 801 | #endif |
806 | 802 | ||
807 | #ifdef SIS315H | 803 | #ifdef CONFIG_FB_SIS_315 |
808 | static bool | 804 | static bool |
809 | SiS_IsYPbPr(struct SiS_Private *SiS_Pr) | 805 | SiS_IsYPbPr(struct SiS_Private *SiS_Pr) |
810 | { | 806 | { |
@@ -816,7 +812,7 @@ SiS_IsYPbPr(struct SiS_Private *SiS_Pr) | |||
816 | } | 812 | } |
817 | #endif | 813 | #endif |
818 | 814 | ||
819 | #ifdef SIS315H | 815 | #ifdef CONFIG_FB_SIS_315 |
820 | static bool | 816 | static bool |
821 | SiS_IsChScart(struct SiS_Private *SiS_Pr) | 817 | SiS_IsChScart(struct SiS_Private *SiS_Pr) |
822 | { | 818 | { |
@@ -828,7 +824,7 @@ SiS_IsChScart(struct SiS_Private *SiS_Pr) | |||
828 | } | 824 | } |
829 | #endif | 825 | #endif |
830 | 826 | ||
831 | #ifdef SIS315H | 827 | #ifdef CONFIG_FB_SIS_315 |
832 | static bool | 828 | static bool |
833 | SiS_IsTVOrYPbPrOrScart(struct SiS_Private *SiS_Pr) | 829 | SiS_IsTVOrYPbPrOrScart(struct SiS_Private *SiS_Pr) |
834 | { | 830 | { |
@@ -848,7 +844,7 @@ SiS_IsTVOrYPbPrOrScart(struct SiS_Private *SiS_Pr) | |||
848 | } | 844 | } |
849 | #endif | 845 | #endif |
850 | 846 | ||
851 | #ifdef SIS315H | 847 | #ifdef CONFIG_FB_SIS_315 |
852 | static bool | 848 | static bool |
853 | SiS_IsLCDOrLCDA(struct SiS_Private *SiS_Pr) | 849 | SiS_IsLCDOrLCDA(struct SiS_Private *SiS_Pr) |
854 | { | 850 | { |
@@ -914,7 +910,7 @@ SiS_BridgeInSlavemode(struct SiS_Private *SiS_Pr) | |||
914 | /*********************************************/ | 910 | /*********************************************/ |
915 | 911 | ||
916 | /* Setup general purpose IO for Chrontel communication */ | 912 | /* Setup general purpose IO for Chrontel communication */ |
917 | #ifdef SIS300 | 913 | #ifdef CONFIG_FB_SIS_300 |
918 | void | 914 | void |
919 | SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo) | 915 | SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo) |
920 | { | 916 | { |
@@ -923,11 +919,7 @@ SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo) | |||
923 | 919 | ||
924 | if(!(SiS_Pr->SiS_ChSW)) return; | 920 | if(!(SiS_Pr->SiS_ChSW)) return; |
925 | 921 | ||
926 | #ifdef SIS_LINUX_KERNEL | ||
927 | acpibase = sisfb_read_lpc_pci_dword(SiS_Pr, 0x74); | 922 | acpibase = sisfb_read_lpc_pci_dword(SiS_Pr, 0x74); |
928 | #else | ||
929 | acpibase = pciReadLong(0x00000800, 0x74); | ||
930 | #endif | ||
931 | acpibase &= 0xFFFF; | 923 | acpibase &= 0xFFFF; |
932 | if(!acpibase) return; | 924 | if(!acpibase) return; |
933 | temp = SiS_GetRegShort((acpibase + 0x3c)); /* ACPI register 0x3c: GP Event 1 I/O mode select */ | 925 | temp = SiS_GetRegShort((acpibase + 0x3c)); /* ACPI register 0x3c: GP Event 1 I/O mode select */ |
@@ -969,7 +961,7 @@ SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
969 | tempax &= (DriverMode | LoadDACFlag | SetNotSimuMode | SetPALTV); | 961 | tempax &= (DriverMode | LoadDACFlag | SetNotSimuMode | SetPALTV); |
970 | tempbx |= tempax; | 962 | tempbx |= tempax; |
971 | 963 | ||
972 | #ifdef SIS315H | 964 | #ifdef CONFIG_FB_SIS_315 |
973 | if(SiS_Pr->ChipType >= SIS_315H) { | 965 | if(SiS_Pr->ChipType >= SIS_315H) { |
974 | if(SiS_Pr->SiS_VBType & VB_SISLCDA) { | 966 | if(SiS_Pr->SiS_VBType & VB_SISLCDA) { |
975 | if(ModeNo == 0x03) { | 967 | if(ModeNo == 0x03) { |
@@ -1019,7 +1011,7 @@ SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
1019 | } | 1011 | } |
1020 | } | 1012 | } |
1021 | 1013 | ||
1022 | #endif /* SIS315H */ | 1014 | #endif /* CONFIG_FB_SIS_315 */ |
1023 | 1015 | ||
1024 | if(!(SiS_Pr->SiS_VBType & VB_SISVGA2)) { | 1016 | if(!(SiS_Pr->SiS_VBType & VB_SISVGA2)) { |
1025 | tempbx &= ~(SetCRT2ToRAMDAC); | 1017 | tempbx &= ~(SetCRT2ToRAMDAC); |
@@ -1154,24 +1146,16 @@ SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
1154 | 1146 | ||
1155 | SiS_Pr->SiS_VBInfo = tempbx; | 1147 | SiS_Pr->SiS_VBInfo = tempbx; |
1156 | 1148 | ||
1157 | #ifdef SIS300 | 1149 | #ifdef CONFIG_FB_SIS_300 |
1158 | if(SiS_Pr->ChipType == SIS_630) { | 1150 | if(SiS_Pr->ChipType == SIS_630) { |
1159 | SiS_SetChrontelGPIO(SiS_Pr, SiS_Pr->SiS_VBInfo); | 1151 | SiS_SetChrontelGPIO(SiS_Pr, SiS_Pr->SiS_VBInfo); |
1160 | } | 1152 | } |
1161 | #endif | 1153 | #endif |
1162 | 1154 | ||
1163 | #ifdef SIS_LINUX_KERNEL | ||
1164 | #if 0 | 1155 | #if 0 |
1165 | printk(KERN_DEBUG "sisfb: (init301: VBInfo= 0x%04x, SetFlag=0x%04x)\n", | 1156 | printk(KERN_DEBUG "sisfb: (init301: VBInfo= 0x%04x, SetFlag=0x%04x)\n", |
1166 | SiS_Pr->SiS_VBInfo, SiS_Pr->SiS_SetFlag); | 1157 | SiS_Pr->SiS_VBInfo, SiS_Pr->SiS_SetFlag); |
1167 | #endif | 1158 | #endif |
1168 | #endif | ||
1169 | #ifdef SIS_XORG_XF86 | ||
1170 | #ifdef TWDEBUG | ||
1171 | xf86DrvMsg(0, X_PROBED, "(init301: VBInfo=0x%04x, SetFlag=0x%04x)\n", | ||
1172 | SiS_Pr->SiS_VBInfo, SiS_Pr->SiS_SetFlag); | ||
1173 | #endif | ||
1174 | #endif | ||
1175 | } | 1159 | } |
1176 | 1160 | ||
1177 | /*********************************************/ | 1161 | /*********************************************/ |
@@ -1415,12 +1399,6 @@ SiS_SetTVMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
1415 | } | 1399 | } |
1416 | 1400 | ||
1417 | SiS_Pr->SiS_VBInfo &= ~SetPALTV; | 1401 | SiS_Pr->SiS_VBInfo &= ~SetPALTV; |
1418 | |||
1419 | #ifdef SIS_XORG_XF86 | ||
1420 | #ifdef TWDEBUG | ||
1421 | xf86DrvMsg(0, X_INFO, "(init301: TVMode %x, VBInfo %x)\n", SiS_Pr->SiS_TVMode, SiS_Pr->SiS_VBInfo); | ||
1422 | #endif | ||
1423 | #endif | ||
1424 | } | 1402 | } |
1425 | 1403 | ||
1426 | /*********************************************/ | 1404 | /*********************************************/ |
@@ -1443,22 +1421,10 @@ SiS_GetBIOSLCDResInfo(struct SiS_Private *SiS_Pr) | |||
1443 | static void | 1421 | static void |
1444 | SiS_GetLCDInfoBIOS(struct SiS_Private *SiS_Pr) | 1422 | SiS_GetLCDInfoBIOS(struct SiS_Private *SiS_Pr) |
1445 | { | 1423 | { |
1446 | #ifdef SIS315H | 1424 | #ifdef CONFIG_FB_SIS_315 |
1447 | unsigned char *ROMAddr; | 1425 | unsigned char *ROMAddr; |
1448 | unsigned short temp; | 1426 | unsigned short temp; |
1449 | 1427 | ||
1450 | #ifdef SIS_XORG_XF86 | ||
1451 | #ifdef TWDEBUG | ||
1452 | xf86DrvMsg(0, X_INFO, "Paneldata driver: [%d %d] [H %d %d] [V %d %d] [C %d 0x%02x 0x%02x]\n", | ||
1453 | SiS_Pr->PanelHT, SiS_Pr->PanelVT, | ||
1454 | SiS_Pr->PanelHRS, SiS_Pr->PanelHRE, | ||
1455 | SiS_Pr->PanelVRS, SiS_Pr->PanelVRE, | ||
1456 | SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].CLOCK, | ||
1457 | SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_A, | ||
1458 | SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_B); | ||
1459 | #endif | ||
1460 | #endif | ||
1461 | |||
1462 | if((ROMAddr = GetLCDStructPtr661(SiS_Pr))) { | 1428 | if((ROMAddr = GetLCDStructPtr661(SiS_Pr))) { |
1463 | if((temp = SISGETROMW(6)) != SiS_Pr->PanelHT) { | 1429 | if((temp = SISGETROMW(6)) != SiS_Pr->PanelHT) { |
1464 | SiS_Pr->SiS_NeedRomModeData = true; | 1430 | SiS_Pr->SiS_NeedRomModeData = true; |
@@ -1480,18 +1446,6 @@ SiS_GetLCDInfoBIOS(struct SiS_Private *SiS_Pr) | |||
1480 | SiS_Pr->SiS_VCLKData[VCLK_CUSTOM_315].SR2C = | 1446 | SiS_Pr->SiS_VCLKData[VCLK_CUSTOM_315].SR2C = |
1481 | SiS_Pr->SiS_VBVCLKData[VCLK_CUSTOM_315].Part4_B = ROMAddr[20]; | 1447 | SiS_Pr->SiS_VBVCLKData[VCLK_CUSTOM_315].Part4_B = ROMAddr[20]; |
1482 | 1448 | ||
1483 | #ifdef SIS_XORG_XF86 | ||
1484 | #ifdef TWDEBUG | ||
1485 | xf86DrvMsg(0, X_INFO, "Paneldata BIOS: [%d %d] [H %d %d] [V %d %d] [C %d 0x%02x 0x%02x]\n", | ||
1486 | SiS_Pr->PanelHT, SiS_Pr->PanelVT, | ||
1487 | SiS_Pr->PanelHRS, SiS_Pr->PanelHRE, | ||
1488 | SiS_Pr->PanelVRS, SiS_Pr->PanelVRE, | ||
1489 | SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].CLOCK, | ||
1490 | SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_A, | ||
1491 | SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_B); | ||
1492 | #endif | ||
1493 | #endif | ||
1494 | |||
1495 | } | 1449 | } |
1496 | #endif | 1450 | #endif |
1497 | } | 1451 | } |
@@ -1517,13 +1471,13 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh | |||
1517 | { | 1471 | { |
1518 | unsigned short temp,modeflag,resinfo=0,modexres=0,modeyres=0; | 1472 | unsigned short temp,modeflag,resinfo=0,modexres=0,modeyres=0; |
1519 | bool panelcanscale = false; | 1473 | bool panelcanscale = false; |
1520 | #ifdef SIS300 | 1474 | #ifdef CONFIG_FB_SIS_300 |
1521 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; | 1475 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; |
1522 | static const unsigned char SiS300SeriesLCDRes[] = | 1476 | static const unsigned char SiS300SeriesLCDRes[] = |
1523 | { 0, 1, 2, 3, 7, 4, 5, 8, | 1477 | { 0, 1, 2, 3, 7, 4, 5, 8, |
1524 | 0, 0, 10, 0, 0, 0, 0, 15 }; | 1478 | 0, 0, 10, 0, 0, 0, 0, 15 }; |
1525 | #endif | 1479 | #endif |
1526 | #ifdef SIS315H | 1480 | #ifdef CONFIG_FB_SIS_315 |
1527 | unsigned char *myptr = NULL; | 1481 | unsigned char *myptr = NULL; |
1528 | #endif | 1482 | #endif |
1529 | 1483 | ||
@@ -1562,7 +1516,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh | |||
1562 | SiS_Pr->SiS_LCDTypeInfo = (temp & 0x0F) - 1; | 1516 | SiS_Pr->SiS_LCDTypeInfo = (temp & 0x0F) - 1; |
1563 | } | 1517 | } |
1564 | temp &= 0x0f; | 1518 | temp &= 0x0f; |
1565 | #ifdef SIS300 | 1519 | #ifdef CONFIG_FB_SIS_300 |
1566 | if(SiS_Pr->ChipType < SIS_315H) { | 1520 | if(SiS_Pr->ChipType < SIS_315H) { |
1567 | /* Very old BIOSes only know 7 sizes (NetVista 2179, 1.01g) */ | 1521 | /* Very old BIOSes only know 7 sizes (NetVista 2179, 1.01g) */ |
1568 | if(SiS_Pr->SiS_VBType & VB_SIS301) { | 1522 | if(SiS_Pr->SiS_VBType & VB_SIS301) { |
@@ -1574,7 +1528,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh | |||
1574 | #endif | 1528 | #endif |
1575 | 1529 | ||
1576 | /* Translate to our internal types */ | 1530 | /* Translate to our internal types */ |
1577 | #ifdef SIS315H | 1531 | #ifdef CONFIG_FB_SIS_315 |
1578 | if(SiS_Pr->ChipType == SIS_550) { | 1532 | if(SiS_Pr->ChipType == SIS_550) { |
1579 | if (temp == Panel310_1152x768) temp = Panel_320x240_2; /* Verified working */ | 1533 | if (temp == Panel310_1152x768) temp = Panel_320x240_2; /* Verified working */ |
1580 | else if(temp == Panel310_320x240_2) temp = Panel_320x240_2; | 1534 | else if(temp == Panel310_320x240_2) temp = Panel_320x240_2; |
@@ -1597,7 +1551,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh | |||
1597 | 1551 | ||
1598 | SiS_Pr->SiS_LCDResInfo = temp; | 1552 | SiS_Pr->SiS_LCDResInfo = temp; |
1599 | 1553 | ||
1600 | #ifdef SIS300 | 1554 | #ifdef CONFIG_FB_SIS_300 |
1601 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { | 1555 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { |
1602 | if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) { | 1556 | if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) { |
1603 | SiS_Pr->SiS_LCDResInfo = Panel_Barco1366; | 1557 | SiS_Pr->SiS_LCDResInfo = Panel_Barco1366; |
@@ -1639,7 +1593,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh | |||
1639 | else if(SiS_Pr->UsePanelScaler == 1) SiS_Pr->SiS_LCDInfo |= DontExpandLCD; | 1593 | else if(SiS_Pr->UsePanelScaler == 1) SiS_Pr->SiS_LCDInfo |= DontExpandLCD; |
1640 | 1594 | ||
1641 | /* Dual link, Pass 1:1 BIOS default, etc. */ | 1595 | /* Dual link, Pass 1:1 BIOS default, etc. */ |
1642 | #ifdef SIS315H | 1596 | #ifdef CONFIG_FB_SIS_315 |
1643 | if(SiS_Pr->ChipType >= SIS_661) { | 1597 | if(SiS_Pr->ChipType >= SIS_661) { |
1644 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { | 1598 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { |
1645 | if(temp & 0x08) SiS_Pr->SiS_LCDInfo |= LCDPass11; | 1599 | if(temp & 0x08) SiS_Pr->SiS_LCDInfo |= LCDPass11; |
@@ -2076,7 +2030,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh | |||
2076 | } | 2030 | } |
2077 | } | 2031 | } |
2078 | 2032 | ||
2079 | #ifdef SIS300 | 2033 | #ifdef CONFIG_FB_SIS_300 |
2080 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { | 2034 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { |
2081 | if(SiS_Pr->SiS_CustomT == CUT_PANEL848 || SiS_Pr->SiS_CustomT == CUT_PANEL856) { | 2035 | if(SiS_Pr->SiS_CustomT == CUT_PANEL848 || SiS_Pr->SiS_CustomT == CUT_PANEL856) { |
2082 | SiS_Pr->SiS_LCDInfo = 0x80 | 0x40 | 0x20; /* neg h/v sync, RGB24(D0 = 0) */ | 2036 | SiS_Pr->SiS_LCDInfo = 0x80 | 0x40 | 0x20; /* neg h/v sync, RGB24(D0 = 0) */ |
@@ -2186,17 +2140,10 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh | |||
2186 | SiS_Pr->SiS_SetFlag |= LCDVESATiming; | 2140 | SiS_Pr->SiS_SetFlag |= LCDVESATiming; |
2187 | } | 2141 | } |
2188 | 2142 | ||
2189 | #ifdef SIS_LINUX_KERNEL | ||
2190 | #if 0 | 2143 | #if 0 |
2191 | printk(KERN_DEBUG "sisfb: (LCDInfo=0x%04x LCDResInfo=0x%02x LCDTypeInfo=0x%02x)\n", | 2144 | printk(KERN_DEBUG "sisfb: (LCDInfo=0x%04x LCDResInfo=0x%02x LCDTypeInfo=0x%02x)\n", |
2192 | SiS_Pr->SiS_LCDInfo, SiS_Pr->SiS_LCDResInfo, SiS_Pr->SiS_LCDTypeInfo); | 2145 | SiS_Pr->SiS_LCDInfo, SiS_Pr->SiS_LCDResInfo, SiS_Pr->SiS_LCDTypeInfo); |
2193 | #endif | 2146 | #endif |
2194 | #endif | ||
2195 | #ifdef SIS_XORG_XF86 | ||
2196 | xf86DrvMsgVerb(0, X_PROBED, 4, | ||
2197 | "(init301: LCDInfo=0x%04x LCDResInfo=0x%02x LCDTypeInfo=0x%02x SetFlag=0x%04x)\n", | ||
2198 | SiS_Pr->SiS_LCDInfo, SiS_Pr->SiS_LCDResInfo, SiS_Pr->SiS_LCDTypeInfo, SiS_Pr->SiS_SetFlag); | ||
2199 | #endif | ||
2200 | } | 2147 | } |
2201 | 2148 | ||
2202 | /*********************************************/ | 2149 | /*********************************************/ |
@@ -2359,7 +2306,7 @@ SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor | |||
2359 | VCLKIndex = SiS_Pr->PanelVCLKIdx315; | 2306 | VCLKIndex = SiS_Pr->PanelVCLKIdx315; |
2360 | } | 2307 | } |
2361 | 2308 | ||
2362 | #ifdef SIS300 | 2309 | #ifdef CONFIG_FB_SIS_300 |
2363 | /* Special Timing: Barco iQ Pro R series */ | 2310 | /* Special Timing: Barco iQ Pro R series */ |
2364 | if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) VCLKIndex = 0x44; | 2311 | if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) VCLKIndex = 0x44; |
2365 | 2312 | ||
@@ -2410,12 +2357,6 @@ SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor | |||
2410 | 2357 | ||
2411 | } | 2358 | } |
2412 | 2359 | ||
2413 | #ifdef SIS_XORG_XF86 | ||
2414 | #ifdef TWDEBUG | ||
2415 | xf86DrvMsg(0, X_INFO, "VCLKIndex %d (0x%x)\n", VCLKIndex, VCLKIndex); | ||
2416 | #endif | ||
2417 | #endif | ||
2418 | |||
2419 | return VCLKIndex; | 2360 | return VCLKIndex; |
2420 | } | 2361 | } |
2421 | 2362 | ||
@@ -2428,10 +2369,10 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2428 | { | 2369 | { |
2429 | unsigned short i, j, modeflag, tempah=0; | 2370 | unsigned short i, j, modeflag, tempah=0; |
2430 | short tempcl; | 2371 | short tempcl; |
2431 | #if defined(SIS300) || defined(SIS315H) | 2372 | #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) |
2432 | unsigned short tempbl; | 2373 | unsigned short tempbl; |
2433 | #endif | 2374 | #endif |
2434 | #ifdef SIS315H | 2375 | #ifdef CONFIG_FB_SIS_315 |
2435 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; | 2376 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; |
2436 | unsigned short tempah2, tempbl2; | 2377 | unsigned short tempah2, tempbl2; |
2437 | #endif | 2378 | #endif |
@@ -2454,7 +2395,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2454 | 2395 | ||
2455 | if(SiS_Pr->ChipType < SIS_315H) { | 2396 | if(SiS_Pr->ChipType < SIS_315H) { |
2456 | 2397 | ||
2457 | #ifdef SIS300 /* ---- 300 series ---- */ | 2398 | #ifdef CONFIG_FB_SIS_300 /* ---- 300 series ---- */ |
2458 | 2399 | ||
2459 | /* For 301BDH: (with LCD via LVDS) */ | 2400 | /* For 301BDH: (with LCD via LVDS) */ |
2460 | if(SiS_Pr->SiS_VBType & VB_NoLCD) { | 2401 | if(SiS_Pr->SiS_VBType & VB_NoLCD) { |
@@ -2477,11 +2418,11 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2477 | 2418 | ||
2478 | if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) tempah ^= 0xA0; | 2419 | if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) tempah ^= 0xA0; |
2479 | 2420 | ||
2480 | #endif /* SIS300 */ | 2421 | #endif /* CONFIG_FB_SIS_300 */ |
2481 | 2422 | ||
2482 | } else { | 2423 | } else { |
2483 | 2424 | ||
2484 | #ifdef SIS315H /* ------- 315/330 series ------ */ | 2425 | #ifdef CONFIG_FB_SIS_315 /* ------- 315/330 series ------ */ |
2485 | 2426 | ||
2486 | if(ModeNo > 0x13) { | 2427 | if(ModeNo > 0x13) { |
2487 | tempcl -= ModeVGA; | 2428 | tempcl -= ModeVGA; |
@@ -2494,7 +2435,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2494 | 2435 | ||
2495 | if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) tempah ^= 0x50; | 2436 | if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) tempah ^= 0x50; |
2496 | 2437 | ||
2497 | #endif /* SIS315H */ | 2438 | #endif /* CONFIG_FB_SIS_315 */ |
2498 | 2439 | ||
2499 | } | 2440 | } |
2500 | 2441 | ||
@@ -2503,7 +2444,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2503 | if(SiS_Pr->ChipType < SIS_315H) { | 2444 | if(SiS_Pr->ChipType < SIS_315H) { |
2504 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x00,tempah); | 2445 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x00,tempah); |
2505 | } else { | 2446 | } else { |
2506 | #ifdef SIS315H | 2447 | #ifdef CONFIG_FB_SIS_315 |
2507 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { | 2448 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { |
2508 | SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x00,0xa0,tempah); | 2449 | SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x00,0xa0,tempah); |
2509 | } else if(SiS_Pr->SiS_VBType & VB_SISVB) { | 2450 | } else if(SiS_Pr->SiS_VBType & VB_SISVB) { |
@@ -2584,7 +2525,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2584 | 2525 | ||
2585 | if(SiS_Pr->ChipType >= SIS_315H) { | 2526 | if(SiS_Pr->ChipType >= SIS_315H) { |
2586 | 2527 | ||
2587 | #ifdef SIS315H | 2528 | #ifdef CONFIG_FB_SIS_315 |
2588 | /* LVDS can only be slave in 8bpp modes */ | 2529 | /* LVDS can only be slave in 8bpp modes */ |
2589 | tempah = 0x80; | 2530 | tempah = 0x80; |
2590 | if((modeflag & CRT2Mode) && (SiS_Pr->SiS_ModeType > ModeVGA)) { | 2531 | if((modeflag & CRT2Mode) && (SiS_Pr->SiS_ModeType > ModeVGA)) { |
@@ -2604,7 +2545,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2604 | 2545 | ||
2605 | } else { | 2546 | } else { |
2606 | 2547 | ||
2607 | #ifdef SIS300 | 2548 | #ifdef CONFIG_FB_SIS_300 |
2608 | tempah = 0; | 2549 | tempah = 0; |
2609 | if( (!(SiS_Pr->SiS_VBInfo & SetInSlaveMode)) && (SiS_Pr->SiS_ModeType > ModeVGA) ) { | 2550 | if( (!(SiS_Pr->SiS_VBInfo & SetInSlaveMode)) && (SiS_Pr->SiS_ModeType > ModeVGA) ) { |
2610 | tempah |= 0x02; | 2551 | tempah |= 0x02; |
@@ -2626,7 +2567,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2626 | 2567 | ||
2627 | if(SiS_Pr->ChipType >= SIS_315H) { | 2568 | if(SiS_Pr->ChipType >= SIS_315H) { |
2628 | 2569 | ||
2629 | #ifdef SIS315H | 2570 | #ifdef CONFIG_FB_SIS_315 |
2630 | /* unsigned char bridgerev = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x01); */ | 2571 | /* unsigned char bridgerev = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x01); */ |
2631 | 2572 | ||
2632 | /* The following is nearly unpreditable and varies from machine | 2573 | /* The following is nearly unpreditable and varies from machine |
@@ -2718,11 +2659,11 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2718 | SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x23,tempbl,tempah); | 2659 | SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x23,tempbl,tempah); |
2719 | } | 2660 | } |
2720 | 2661 | ||
2721 | #endif /* SIS315H */ | 2662 | #endif /* CONFIG_FB_SIS_315 */ |
2722 | 2663 | ||
2723 | } else if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { | 2664 | } else if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { |
2724 | 2665 | ||
2725 | #ifdef SIS300 | 2666 | #ifdef CONFIG_FB_SIS_300 |
2726 | SiS_SetRegAND(SiS_Pr->SiS_Part4Port,0x21,0x3f); | 2667 | SiS_SetRegAND(SiS_Pr->SiS_Part4Port,0x21,0x3f); |
2727 | 2668 | ||
2728 | if((SiS_Pr->SiS_VBInfo & DisableCRT2Display) || | 2669 | if((SiS_Pr->SiS_VBInfo & DisableCRT2Display) || |
@@ -2745,7 +2686,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2745 | 2686 | ||
2746 | } else { /* LVDS */ | 2687 | } else { /* LVDS */ |
2747 | 2688 | ||
2748 | #ifdef SIS315H | 2689 | #ifdef CONFIG_FB_SIS_315 |
2749 | if(SiS_Pr->ChipType >= SIS_315H) { | 2690 | if(SiS_Pr->ChipType >= SIS_315H) { |
2750 | 2691 | ||
2751 | if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) { | 2692 | if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) { |
@@ -2931,7 +2872,7 @@ SiS_GetCRT2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
2931 | } | 2872 | } |
2932 | } | 2873 | } |
2933 | 2874 | ||
2934 | #ifdef SIS315H | 2875 | #ifdef CONFIG_FB_SIS_315 |
2935 | if(SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) { | 2876 | if(SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) { |
2936 | if(SiS_Pr->SiS_LCDResInfo == Panel_1280x1024) { | 2877 | if(SiS_Pr->SiS_LCDResInfo == Panel_1280x1024) { |
2937 | if(!(SiS_Pr->SiS_LCDInfo & DontExpandLCD)) { | 2878 | if(!(SiS_Pr->SiS_LCDInfo & DontExpandLCD)) { |
@@ -3036,7 +2977,7 @@ SiS_GetCRT2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
3036 | case Panel_1280x1024: tempbx = 24; break; | 2977 | case Panel_1280x1024: tempbx = 24; break; |
3037 | case Panel_1400x1050: tempbx = 26; break; | 2978 | case Panel_1400x1050: tempbx = 26; break; |
3038 | case Panel_1600x1200: tempbx = 28; break; | 2979 | case Panel_1600x1200: tempbx = 28; break; |
3039 | #ifdef SIS300 | 2980 | #ifdef CONFIG_FB_SIS_300 |
3040 | case Panel_Barco1366: tempbx = 80; break; | 2981 | case Panel_Barco1366: tempbx = 80; break; |
3041 | #endif | 2982 | #endif |
3042 | } | 2983 | } |
@@ -3053,7 +2994,7 @@ SiS_GetCRT2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
3053 | 2994 | ||
3054 | if(SiS_Pr->SiS_LCDInfo & LCDPass11) tempbx = 30; | 2995 | if(SiS_Pr->SiS_LCDInfo & LCDPass11) tempbx = 30; |
3055 | 2996 | ||
3056 | #ifdef SIS300 | 2997 | #ifdef CONFIG_FB_SIS_300 |
3057 | if(SiS_Pr->SiS_CustomT == CUT_BARCO1024) { | 2998 | if(SiS_Pr->SiS_CustomT == CUT_BARCO1024) { |
3058 | tempbx = 82; | 2999 | tempbx = 82; |
3059 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) tempbx++; | 3000 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) tempbx++; |
@@ -3189,7 +3130,7 @@ SiS_GetCRT2DataLVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
3189 | 3130 | ||
3190 | if((SiS_Pr->SiS_VBType & VB_SISVB) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { | 3131 | if((SiS_Pr->SiS_VBType & VB_SISVB) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { |
3191 | 3132 | ||
3192 | #ifdef SIS315H | 3133 | #ifdef CONFIG_FB_SIS_315 |
3193 | SiS_CalcPanelLinkTiming(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); | 3134 | SiS_CalcPanelLinkTiming(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); |
3194 | SiS_CalcLCDACRT1Timing(SiS_Pr, ModeNo, ModeIdIndex); | 3135 | SiS_CalcLCDACRT1Timing(SiS_Pr, ModeNo, ModeIdIndex); |
3195 | #endif | 3136 | #endif |
@@ -3214,7 +3155,7 @@ SiS_GetCRT2DataLVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
3214 | case 16: LVDSData = SiS_Pr->SiS_LVDS800x600Data_1; break; | 3155 | case 16: LVDSData = SiS_Pr->SiS_LVDS800x600Data_1; break; |
3215 | case 18: LVDSData = SiS_Pr->SiS_LVDS1024x600Data_1; break; | 3156 | case 18: LVDSData = SiS_Pr->SiS_LVDS1024x600Data_1; break; |
3216 | case 20: LVDSData = SiS_Pr->SiS_LVDS1024x768Data_1; break; | 3157 | case 20: LVDSData = SiS_Pr->SiS_LVDS1024x768Data_1; break; |
3217 | #ifdef SIS300 | 3158 | #ifdef CONFIG_FB_SIS_300 |
3218 | case 80: LVDSData = SiS_Pr->SiS_LVDSBARCO1366Data_1; break; | 3159 | case 80: LVDSData = SiS_Pr->SiS_LVDSBARCO1366Data_1; break; |
3219 | case 81: LVDSData = SiS_Pr->SiS_LVDSBARCO1366Data_2; break; | 3160 | case 81: LVDSData = SiS_Pr->SiS_LVDSBARCO1366Data_2; break; |
3220 | case 82: LVDSData = SiS_Pr->SiS_LVDSBARCO1024Data_1; break; | 3161 | case 82: LVDSData = SiS_Pr->SiS_LVDSBARCO1024Data_1; break; |
@@ -3248,7 +3189,7 @@ SiS_GetCRT2DataLVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
3248 | (SiS_Pr->SiS_SetFlag & SetDOSMode) ) { | 3189 | (SiS_Pr->SiS_SetFlag & SetDOSMode) ) { |
3249 | SiS_Pr->SiS_HDE = SiS_Pr->PanelXRes; | 3190 | SiS_Pr->SiS_HDE = SiS_Pr->PanelXRes; |
3250 | SiS_Pr->SiS_VDE = SiS_Pr->PanelYRes; | 3191 | SiS_Pr->SiS_VDE = SiS_Pr->PanelYRes; |
3251 | #ifdef SIS300 | 3192 | #ifdef CONFIG_FB_SIS_300 |
3252 | if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) { | 3193 | if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) { |
3253 | if(ResIndex < 0x08) { | 3194 | if(ResIndex < 0x08) { |
3254 | SiS_Pr->SiS_HDE = 1280; | 3195 | SiS_Pr->SiS_HDE = 1280; |
@@ -3270,7 +3211,7 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3270 | unsigned short resinfo, CRT2Index, ResIndex; | 3211 | unsigned short resinfo, CRT2Index, ResIndex; |
3271 | const struct SiS_LCDData *LCDPtr = NULL; | 3212 | const struct SiS_LCDData *LCDPtr = NULL; |
3272 | const struct SiS_TVData *TVPtr = NULL; | 3213 | const struct SiS_TVData *TVPtr = NULL; |
3273 | #ifdef SIS315H | 3214 | #ifdef CONFIG_FB_SIS_315 |
3274 | short resinfo661; | 3215 | short resinfo661; |
3275 | #endif | 3216 | #endif |
3276 | 3217 | ||
@@ -3283,7 +3224,7 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3283 | } else { | 3224 | } else { |
3284 | modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; | 3225 | modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; |
3285 | resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO; | 3226 | resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO; |
3286 | #ifdef SIS315H | 3227 | #ifdef CONFIG_FB_SIS_315 |
3287 | resinfo661 = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].ROMMODEIDX661; | 3228 | resinfo661 = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].ROMMODEIDX661; |
3288 | if( (SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) && | 3229 | if( (SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) && |
3289 | (SiS_Pr->SiS_SetFlag & LCDVESATiming) && | 3230 | (SiS_Pr->SiS_SetFlag & LCDVESATiming) && |
@@ -3460,7 +3401,7 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3460 | 3401 | ||
3461 | } else if( (!(SiS_Pr->SiS_LCDInfo & DontExpandLCD)) && (romptr) && (ROMAddr) ) { | 3402 | } else if( (!(SiS_Pr->SiS_LCDInfo & DontExpandLCD)) && (romptr) && (ROMAddr) ) { |
3462 | 3403 | ||
3463 | #ifdef SIS315H | 3404 | #ifdef CONFIG_FB_SIS_315 |
3464 | SiS_Pr->SiS_RVBHCMAX = ROMAddr[romptr]; | 3405 | SiS_Pr->SiS_RVBHCMAX = ROMAddr[romptr]; |
3465 | SiS_Pr->SiS_RVBHCFACT = ROMAddr[romptr+1]; | 3406 | SiS_Pr->SiS_RVBHCFACT = ROMAddr[romptr+1]; |
3466 | SiS_Pr->SiS_VGAHT = ROMAddr[romptr+2] | ((ROMAddr[romptr+3] & 0x0f) << 8); | 3407 | SiS_Pr->SiS_VGAHT = ROMAddr[romptr+2] | ((ROMAddr[romptr+3] & 0x0f) << 8); |
@@ -3520,19 +3461,13 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3520 | case Panel_1680x1050 : | 3461 | case Panel_1680x1050 : |
3521 | case Panel_1680x1050 + 32: LCDPtr = SiS_Pr->SiS_LCD1680x1050Data; break; | 3462 | case Panel_1680x1050 + 32: LCDPtr = SiS_Pr->SiS_LCD1680x1050Data; break; |
3522 | case 100 : LCDPtr = SiS_Pr->SiS_NoScaleData; break; | 3463 | case 100 : LCDPtr = SiS_Pr->SiS_NoScaleData; break; |
3523 | #ifdef SIS315H | 3464 | #ifdef CONFIG_FB_SIS_315 |
3524 | case 200 : LCDPtr = SiS310_ExtCompaq1280x1024Data; break; | 3465 | case 200 : LCDPtr = SiS310_ExtCompaq1280x1024Data; break; |
3525 | case 201 : LCDPtr = SiS_Pr->SiS_St2LCD1280x1024Data; break; | 3466 | case 201 : LCDPtr = SiS_Pr->SiS_St2LCD1280x1024Data; break; |
3526 | #endif | 3467 | #endif |
3527 | default : LCDPtr = SiS_Pr->SiS_ExtLCD1024x768Data; break; | 3468 | default : LCDPtr = SiS_Pr->SiS_ExtLCD1024x768Data; break; |
3528 | } | 3469 | } |
3529 | 3470 | ||
3530 | #ifdef SIS_XORG_XF86 | ||
3531 | #ifdef TWDEBUG | ||
3532 | xf86DrvMsg(0, X_INFO, "GetCRT2Data: Index %d ResIndex %d\n", CRT2Index, ResIndex); | ||
3533 | #endif | ||
3534 | #endif | ||
3535 | |||
3536 | SiS_Pr->SiS_RVBHCMAX = (LCDPtr+ResIndex)->RVBHCMAX; | 3471 | SiS_Pr->SiS_RVBHCMAX = (LCDPtr+ResIndex)->RVBHCMAX; |
3537 | SiS_Pr->SiS_RVBHCFACT = (LCDPtr+ResIndex)->RVBHCFACT; | 3472 | SiS_Pr->SiS_RVBHCFACT = (LCDPtr+ResIndex)->RVBHCFACT; |
3538 | SiS_Pr->SiS_VGAHT = (LCDPtr+ResIndex)->VGAHT; | 3473 | SiS_Pr->SiS_VGAHT = (LCDPtr+ResIndex)->VGAHT; |
@@ -3624,7 +3559,7 @@ SiS_GetLVDSDesPtr(struct SiS_Private *SiS_Pr) | |||
3624 | { | 3559 | { |
3625 | const struct SiS_LVDSDes *PanelDesPtr = NULL; | 3560 | const struct SiS_LVDSDes *PanelDesPtr = NULL; |
3626 | 3561 | ||
3627 | #ifdef SIS300 | 3562 | #ifdef CONFIG_FB_SIS_300 |
3628 | if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) { | 3563 | if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) { |
3629 | 3564 | ||
3630 | if(SiS_Pr->ChipType < SIS_315H) { | 3565 | if(SiS_Pr->ChipType < SIS_315H) { |
@@ -3696,7 +3631,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3696 | 3631 | ||
3697 | if((SiS_Pr->SiS_VBType & VB_SIS30xBLV) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { | 3632 | if((SiS_Pr->SiS_VBType & VB_SIS30xBLV) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { |
3698 | 3633 | ||
3699 | #ifdef SIS315H | 3634 | #ifdef CONFIG_FB_SIS_315 |
3700 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { | 3635 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { |
3701 | /* non-pass 1:1 only, see above */ | 3636 | /* non-pass 1:1 only, see above */ |
3702 | if(SiS_Pr->SiS_VGAHDE != SiS_Pr->PanelXRes) { | 3637 | if(SiS_Pr->SiS_VGAHDE != SiS_Pr->PanelXRes) { |
@@ -3771,7 +3706,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3771 | } else { | 3706 | } else { |
3772 | 3707 | ||
3773 | if(SiS_Pr->ChipType < SIS_315H) { | 3708 | if(SiS_Pr->ChipType < SIS_315H) { |
3774 | #ifdef SIS300 | 3709 | #ifdef CONFIG_FB_SIS_300 |
3775 | switch(SiS_Pr->SiS_LCDResInfo) { | 3710 | switch(SiS_Pr->SiS_LCDResInfo) { |
3776 | case Panel_800x600: | 3711 | case Panel_800x600: |
3777 | if(SiS_Pr->SiS_VGAVDE == SiS_Pr->PanelYRes) { | 3712 | if(SiS_Pr->SiS_VGAVDE == SiS_Pr->PanelYRes) { |
@@ -3816,7 +3751,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3816 | } | 3751 | } |
3817 | #endif | 3752 | #endif |
3818 | } else { | 3753 | } else { |
3819 | #ifdef SIS315H | 3754 | #ifdef CONFIG_FB_SIS_315 |
3820 | switch(SiS_Pr->SiS_LCDResInfo) { | 3755 | switch(SiS_Pr->SiS_LCDResInfo) { |
3821 | case Panel_1024x768: | 3756 | case Panel_1024x768: |
3822 | case Panel_1280x1024: | 3757 | case Panel_1280x1024: |
@@ -3844,7 +3779,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3844 | if(SiS_Pr->ChipType < SIS_315H) { | 3779 | if(SiS_Pr->ChipType < SIS_315H) { |
3845 | if(!(modeflag & HalfDCLK)) SiS_Pr->SiS_LCDHDES = 320; | 3780 | if(!(modeflag & HalfDCLK)) SiS_Pr->SiS_LCDHDES = 320; |
3846 | } else { | 3781 | } else { |
3847 | #ifdef SIS315H | 3782 | #ifdef CONFIG_FB_SIS_315 |
3848 | if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) SiS_Pr->SiS_LCDHDES = 480; | 3783 | if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) SiS_Pr->SiS_LCDHDES = 480; |
3849 | if(SiS_Pr->SiS_LCDResInfo == Panel_1400x1050) SiS_Pr->SiS_LCDHDES = 804; | 3784 | if(SiS_Pr->SiS_LCDResInfo == Panel_1400x1050) SiS_Pr->SiS_LCDHDES = 804; |
3850 | if(SiS_Pr->SiS_LCDResInfo == Panel_1600x1200) SiS_Pr->SiS_LCDHDES = 704; | 3785 | if(SiS_Pr->SiS_LCDResInfo == Panel_1600x1200) SiS_Pr->SiS_LCDHDES = 704; |
@@ -3866,7 +3801,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3866 | /* DISABLE VIDEO BRIDGE */ | 3801 | /* DISABLE VIDEO BRIDGE */ |
3867 | /*********************************************/ | 3802 | /*********************************************/ |
3868 | 3803 | ||
3869 | #ifdef SIS315H | 3804 | #ifdef CONFIG_FB_SIS_315 |
3870 | static int | 3805 | static int |
3871 | SiS_HandlePWD(struct SiS_Private *SiS_Pr) | 3806 | SiS_HandlePWD(struct SiS_Private *SiS_Pr) |
3872 | { | 3807 | { |
@@ -3891,11 +3826,6 @@ SiS_HandlePWD(struct SiS_Private *SiS_Pr) | |||
3891 | ret = 1; | 3826 | ret = 1; |
3892 | } | 3827 | } |
3893 | SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x27,0x7f,temp); | 3828 | SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x27,0x7f,temp); |
3894 | #ifdef SIS_XORG_XF86 | ||
3895 | #ifdef TWDEBUG | ||
3896 | xf86DrvMsg(0, 0, "Setting PWD %x\n", temp); | ||
3897 | #endif | ||
3898 | #endif | ||
3899 | } | 3829 | } |
3900 | #endif | 3830 | #endif |
3901 | return ret; | 3831 | return ret; |
@@ -3909,7 +3839,7 @@ SiS_HandlePWD(struct SiS_Private *SiS_Pr) | |||
3909 | void | 3839 | void |
3910 | SiS_DisableBridge(struct SiS_Private *SiS_Pr) | 3840 | SiS_DisableBridge(struct SiS_Private *SiS_Pr) |
3911 | { | 3841 | { |
3912 | #ifdef SIS315H | 3842 | #ifdef CONFIG_FB_SIS_315 |
3913 | unsigned short tempah, pushax=0, modenum; | 3843 | unsigned short tempah, pushax=0, modenum; |
3914 | #endif | 3844 | #endif |
3915 | unsigned short temp=0; | 3845 | unsigned short temp=0; |
@@ -3920,7 +3850,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) | |||
3920 | 3850 | ||
3921 | if(SiS_Pr->ChipType < SIS_315H) { | 3851 | if(SiS_Pr->ChipType < SIS_315H) { |
3922 | 3852 | ||
3923 | #ifdef SIS300 /* 300 series */ | 3853 | #ifdef CONFIG_FB_SIS_300 /* 300 series */ |
3924 | 3854 | ||
3925 | if(!(SiS_CR36BIOSWord23b(SiS_Pr))) { | 3855 | if(!(SiS_CR36BIOSWord23b(SiS_Pr))) { |
3926 | if(SiS_Pr->SiS_VBType & VB_SISLVDS) { | 3856 | if(SiS_Pr->SiS_VBType & VB_SISLVDS) { |
@@ -3953,11 +3883,11 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) | |||
3953 | } | 3883 | } |
3954 | } | 3884 | } |
3955 | 3885 | ||
3956 | #endif /* SIS300 */ | 3886 | #endif /* CONFIG_FB_SIS_300 */ |
3957 | 3887 | ||
3958 | } else { | 3888 | } else { |
3959 | 3889 | ||
3960 | #ifdef SIS315H /* 315 series */ | 3890 | #ifdef CONFIG_FB_SIS_315 /* 315 series */ |
3961 | 3891 | ||
3962 | int didpwd = 0; | 3892 | int didpwd = 0; |
3963 | bool custom1 = (SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) || | 3893 | bool custom1 = (SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) || |
@@ -4081,14 +4011,14 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) | |||
4081 | 4011 | ||
4082 | } | 4012 | } |
4083 | 4013 | ||
4084 | #endif /* SIS315H */ | 4014 | #endif /* CONFIG_FB_SIS_315 */ |
4085 | 4015 | ||
4086 | } | 4016 | } |
4087 | 4017 | ||
4088 | } else { /* ============ For 301 ================ */ | 4018 | } else { /* ============ For 301 ================ */ |
4089 | 4019 | ||
4090 | if(SiS_Pr->ChipType < SIS_315H) { | 4020 | if(SiS_Pr->ChipType < SIS_315H) { |
4091 | #ifdef SIS300 | 4021 | #ifdef CONFIG_FB_SIS_300 |
4092 | if(!(SiS_CR36BIOSWord23b(SiS_Pr))) { | 4022 | if(!(SiS_CR36BIOSWord23b(SiS_Pr))) { |
4093 | SiS_SetRegSR11ANDOR(SiS_Pr,0xF7,0x08); | 4023 | SiS_SetRegSR11ANDOR(SiS_Pr,0xF7,0x08); |
4094 | SiS_PanelDelay(SiS_Pr, 3); | 4024 | SiS_PanelDelay(SiS_Pr, 3); |
@@ -4111,7 +4041,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) | |||
4111 | SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x20); | 4041 | SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x20); |
4112 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x00,temp); | 4042 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x00,temp); |
4113 | } else { | 4043 | } else { |
4114 | #ifdef SIS300 | 4044 | #ifdef CONFIG_FB_SIS_300 |
4115 | SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x1E,0xDF); /* disable CRT2 */ | 4045 | SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x1E,0xDF); /* disable CRT2 */ |
4116 | if( (!(SiS_CRT2IsLCD(SiS_Pr))) || | 4046 | if( (!(SiS_CRT2IsLCD(SiS_Pr))) || |
4117 | (!(SiS_CR36BIOSWord23d(SiS_Pr))) ) { | 4047 | (!(SiS_CR36BIOSWord23d(SiS_Pr))) ) { |
@@ -4127,7 +4057,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) | |||
4127 | 4057 | ||
4128 | if(SiS_Pr->ChipType < SIS_315H) { | 4058 | if(SiS_Pr->ChipType < SIS_315H) { |
4129 | 4059 | ||
4130 | #ifdef SIS300 /* 300 series */ | 4060 | #ifdef CONFIG_FB_SIS_300 /* 300 series */ |
4131 | 4061 | ||
4132 | if(SiS_Pr->SiS_IF_DEF_CH70xx == 1) { | 4062 | if(SiS_Pr->SiS_IF_DEF_CH70xx == 1) { |
4133 | SiS_SetCH700x(SiS_Pr,0x0E,0x09); | 4063 | SiS_SetCH700x(SiS_Pr,0x0E,0x09); |
@@ -4171,11 +4101,11 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) | |||
4171 | SiS_SetRegSR11ANDOR(SiS_Pr,0xFB,0x04); | 4101 | SiS_SetRegSR11ANDOR(SiS_Pr,0xFB,0x04); |
4172 | } | 4102 | } |
4173 | 4103 | ||
4174 | #endif /* SIS300 */ | 4104 | #endif /* CONFIG_FB_SIS_300 */ |
4175 | 4105 | ||
4176 | } else { | 4106 | } else { |
4177 | 4107 | ||
4178 | #ifdef SIS315H /* 315 series */ | 4108 | #ifdef CONFIG_FB_SIS_315 /* 315 series */ |
4179 | 4109 | ||
4180 | if(!(SiS_IsNotM650orLater(SiS_Pr))) { | 4110 | if(!(SiS_IsNotM650orLater(SiS_Pr))) { |
4181 | /*if(SiS_Pr->ChipType < SIS_340) { */ /* XGI needs this */ | 4111 | /*if(SiS_Pr->ChipType < SIS_340) { */ /* XGI needs this */ |
@@ -4288,7 +4218,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) | |||
4288 | } | 4218 | } |
4289 | } | 4219 | } |
4290 | 4220 | ||
4291 | #endif /* SIS315H */ | 4221 | #endif /* CONFIG_FB_SIS_315 */ |
4292 | 4222 | ||
4293 | } /* 315 series */ | 4223 | } /* 315 series */ |
4294 | 4224 | ||
@@ -4304,14 +4234,12 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) | |||
4304 | * from outside the context of a mode switch! | 4234 | * from outside the context of a mode switch! |
4305 | * MUST call getVBType before calling this | 4235 | * MUST call getVBType before calling this |
4306 | */ | 4236 | */ |
4307 | #ifdef SIS_LINUX_KERNEL | ||
4308 | static | 4237 | static |
4309 | #endif | ||
4310 | void | 4238 | void |
4311 | SiS_EnableBridge(struct SiS_Private *SiS_Pr) | 4239 | SiS_EnableBridge(struct SiS_Private *SiS_Pr) |
4312 | { | 4240 | { |
4313 | unsigned short temp=0, tempah; | 4241 | unsigned short temp=0, tempah; |
4314 | #ifdef SIS315H | 4242 | #ifdef CONFIG_FB_SIS_315 |
4315 | unsigned short temp1, pushax=0; | 4243 | unsigned short temp1, pushax=0; |
4316 | bool delaylong = false; | 4244 | bool delaylong = false; |
4317 | #endif | 4245 | #endif |
@@ -4322,7 +4250,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) | |||
4322 | 4250 | ||
4323 | if(SiS_Pr->ChipType < SIS_315H) { | 4251 | if(SiS_Pr->ChipType < SIS_315H) { |
4324 | 4252 | ||
4325 | #ifdef SIS300 /* 300 series */ | 4253 | #ifdef CONFIG_FB_SIS_300 /* 300 series */ |
4326 | 4254 | ||
4327 | if(SiS_CRT2IsLCD(SiS_Pr)) { | 4255 | if(SiS_CRT2IsLCD(SiS_Pr)) { |
4328 | if(SiS_Pr->SiS_VBType & VB_SISLVDS) { | 4256 | if(SiS_Pr->SiS_VBType & VB_SISLVDS) { |
@@ -4385,11 +4313,11 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) | |||
4385 | } | 4313 | } |
4386 | 4314 | ||
4387 | 4315 | ||
4388 | #endif /* SIS300 */ | 4316 | #endif /* CONFIG_FB_SIS_300 */ |
4389 | 4317 | ||
4390 | } else { | 4318 | } else { |
4391 | 4319 | ||
4392 | #ifdef SIS315H /* 315 series */ | 4320 | #ifdef CONFIG_FB_SIS_315 /* 315 series */ |
4393 | 4321 | ||
4394 | #ifdef SET_EMI | 4322 | #ifdef SET_EMI |
4395 | unsigned char r30=0, r31=0, r32=0, r33=0, cr36=0; | 4323 | unsigned char r30=0, r31=0, r32=0, r33=0, cr36=0; |
@@ -4688,7 +4616,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) | |||
4688 | SiS_SetRegAND(SiS_Pr->SiS_Part1Port,0x00,0x7f); | 4616 | SiS_SetRegAND(SiS_Pr->SiS_Part1Port,0x00,0x7f); |
4689 | } | 4617 | } |
4690 | 4618 | ||
4691 | #endif /* SIS315H */ | 4619 | #endif /* CONFIG_FB_SIS_315 */ |
4692 | 4620 | ||
4693 | } | 4621 | } |
4694 | 4622 | ||
@@ -4739,7 +4667,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) | |||
4739 | 4667 | ||
4740 | if(SiS_Pr->ChipType < SIS_315H) { | 4668 | if(SiS_Pr->ChipType < SIS_315H) { |
4741 | 4669 | ||
4742 | #ifdef SIS300 /* 300 series */ | 4670 | #ifdef CONFIG_FB_SIS_300 /* 300 series */ |
4743 | 4671 | ||
4744 | if(SiS_CRT2IsLCD(SiS_Pr)) { | 4672 | if(SiS_CRT2IsLCD(SiS_Pr)) { |
4745 | if(SiS_Pr->ChipType == SIS_730) { | 4673 | if(SiS_Pr->ChipType == SIS_730) { |
@@ -4783,11 +4711,11 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) | |||
4783 | } | 4711 | } |
4784 | } | 4712 | } |
4785 | 4713 | ||
4786 | #endif /* SIS300 */ | 4714 | #endif /* CONFIG_FB_SIS_300 */ |
4787 | 4715 | ||
4788 | } else { | 4716 | } else { |
4789 | 4717 | ||
4790 | #ifdef SIS315H /* 315 series */ | 4718 | #ifdef CONFIG_FB_SIS_315 /* 315 series */ |
4791 | 4719 | ||
4792 | if(!(SiS_IsNotM650orLater(SiS_Pr))) { | 4720 | if(!(SiS_IsNotM650orLater(SiS_Pr))) { |
4793 | /*if(SiS_Pr->ChipType < SIS_340) {*/ /* XGI needs this */ | 4721 | /*if(SiS_Pr->ChipType < SIS_340) {*/ /* XGI needs this */ |
@@ -4881,7 +4809,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) | |||
4881 | } | 4809 | } |
4882 | } | 4810 | } |
4883 | 4811 | ||
4884 | #endif /* SIS315H */ | 4812 | #endif /* CONFIG_FB_SIS_315 */ |
4885 | 4813 | ||
4886 | } /* 310 series */ | 4814 | } /* 310 series */ |
4887 | 4815 | ||
@@ -4971,7 +4899,7 @@ SiS_SetCRT2Sync(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor | |||
4971 | 4899 | ||
4972 | if(SiS_Pr->ChipType < SIS_315H) { | 4900 | if(SiS_Pr->ChipType < SIS_315H) { |
4973 | 4901 | ||
4974 | #ifdef SIS300 /* ---- 300 series --- */ | 4902 | #ifdef CONFIG_FB_SIS_300 /* ---- 300 series --- */ |
4975 | 4903 | ||
4976 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { /* 630 - 301B(-DH) */ | 4904 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { /* 630 - 301B(-DH) */ |
4977 | 4905 | ||
@@ -5000,11 +4928,11 @@ SiS_SetCRT2Sync(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor | |||
5000 | 4928 | ||
5001 | } | 4929 | } |
5002 | 4930 | ||
5003 | #endif /* SIS300 */ | 4931 | #endif /* CONFIG_FB_SIS_300 */ |
5004 | 4932 | ||
5005 | } else { | 4933 | } else { |
5006 | 4934 | ||
5007 | #ifdef SIS315H /* ------- 315 series ------ */ | 4935 | #ifdef CONFIG_FB_SIS_315 /* ------- 315 series ------ */ |
5008 | 4936 | ||
5009 | if(SiS_Pr->SiS_VBType & VB_SISLVDS) { /* 315 - LVDS */ | 4937 | if(SiS_Pr->SiS_VBType & VB_SISLVDS) { /* 315 - LVDS */ |
5010 | 4938 | ||
@@ -5076,13 +5004,13 @@ SiS_SetCRT2Sync(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor | |||
5076 | } | 5004 | } |
5077 | 5005 | ||
5078 | } | 5006 | } |
5079 | #endif /* SIS315H */ | 5007 | #endif /* CONFIG_FB_SIS_315 */ |
5080 | } | 5008 | } |
5081 | } | 5009 | } |
5082 | } | 5010 | } |
5083 | 5011 | ||
5084 | /* Set CRT2 FIFO on 300/540/630/730 */ | 5012 | /* Set CRT2 FIFO on 300/540/630/730 */ |
5085 | #ifdef SIS300 | 5013 | #ifdef CONFIG_FB_SIS_300 |
5086 | static void | 5014 | static void |
5087 | SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo) | 5015 | SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo) |
5088 | { | 5016 | { |
@@ -5154,13 +5082,8 @@ SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo) | |||
5154 | 5082 | ||
5155 | } else { | 5083 | } else { |
5156 | 5084 | ||
5157 | #ifdef SIS_LINUX_KERNEL | ||
5158 | pci50 = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50); | 5085 | pci50 = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50); |
5159 | pciA0 = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xa0); | 5086 | pciA0 = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xa0); |
5160 | #else | ||
5161 | pci50 = pciReadLong(0x00000000, 0x50); | ||
5162 | pciA0 = pciReadLong(0x00000000, 0xA0); | ||
5163 | #endif | ||
5164 | 5087 | ||
5165 | if(SiS_Pr->ChipType == SIS_730) { | 5088 | if(SiS_Pr->ChipType == SIS_730) { |
5166 | 5089 | ||
@@ -5262,7 +5185,7 @@ SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo) | |||
5262 | #endif | 5185 | #endif |
5263 | 5186 | ||
5264 | /* Set CRT2 FIFO on 315/330 series */ | 5187 | /* Set CRT2 FIFO on 315/330 series */ |
5265 | #ifdef SIS315H | 5188 | #ifdef CONFIG_FB_SIS_315 |
5266 | static void | 5189 | static void |
5267 | SiS_SetCRT2FIFO_310(struct SiS_Private *SiS_Pr) | 5190 | SiS_SetCRT2FIFO_310(struct SiS_Private *SiS_Pr) |
5268 | { | 5191 | { |
@@ -5420,27 +5343,6 @@ SiS_SetGroup1_301(struct SiS_Private *SiS_Pr, unsigned short ModeNo,unsigned sho | |||
5420 | 5343 | ||
5421 | temp = SiS_GetRegByte((SiS_Pr->SiS_P3ca+0x02)); | 5344 | temp = SiS_GetRegByte((SiS_Pr->SiS_P3ca+0x02)); |
5422 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x1b,temp); /* ? */ | 5345 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x1b,temp); /* ? */ |
5423 | |||
5424 | #ifdef SIS_XORG_XF86 | ||
5425 | #ifdef TWDEBUG | ||
5426 | xf86DrvMsg(0, X_INFO, "%d %d %d %d %d %d %d %d (%d %d %d %d)\n", | ||
5427 | SiS_Pr->CHDisplay, SiS_Pr->CHSyncStart, SiS_Pr->CHSyncEnd, SiS_Pr->CHTotal, | ||
5428 | SiS_Pr->CVDisplay, SiS_Pr->CVSyncStart, SiS_Pr->CVSyncEnd, SiS_Pr->CVTotal, | ||
5429 | SiS_Pr->CHBlankStart, SiS_Pr->CHBlankEnd, SiS_Pr->CVBlankStart, SiS_Pr->CVBlankEnd); | ||
5430 | |||
5431 | xf86DrvMsg(0, X_INFO, " {{0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n", | ||
5432 | SiS_Pr->CCRT1CRTC[0], SiS_Pr->CCRT1CRTC[1], | ||
5433 | SiS_Pr->CCRT1CRTC[2], SiS_Pr->CCRT1CRTC[3], | ||
5434 | SiS_Pr->CCRT1CRTC[4], SiS_Pr->CCRT1CRTC[5], | ||
5435 | SiS_Pr->CCRT1CRTC[6], SiS_Pr->CCRT1CRTC[7]); | ||
5436 | xf86DrvMsg(0, X_INFO, " 0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n", | ||
5437 | SiS_Pr->CCRT1CRTC[8], SiS_Pr->CCRT1CRTC[9], | ||
5438 | SiS_Pr->CCRT1CRTC[10], SiS_Pr->CCRT1CRTC[11], | ||
5439 | SiS_Pr->CCRT1CRTC[12], SiS_Pr->CCRT1CRTC[13], | ||
5440 | SiS_Pr->CCRT1CRTC[14], SiS_Pr->CCRT1CRTC[15]); | ||
5441 | xf86DrvMsg(0, X_INFO, " 0x%02x}},\n", SiS_Pr->CCRT1CRTC[16]); | ||
5442 | #endif | ||
5443 | #endif | ||
5444 | } | 5346 | } |
5445 | 5347 | ||
5446 | /* Setup panel link | 5348 | /* Setup panel link |
@@ -5455,17 +5357,17 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5455 | unsigned short push2, tempax, tempbx, tempcx, temp; | 5357 | unsigned short push2, tempax, tempbx, tempcx, temp; |
5456 | unsigned int tempeax = 0, tempebx, tempecx, tempvcfact = 0; | 5358 | unsigned int tempeax = 0, tempebx, tempecx, tempvcfact = 0; |
5457 | bool islvds = false, issis = false, chkdclkfirst = false; | 5359 | bool islvds = false, issis = false, chkdclkfirst = false; |
5458 | #ifdef SIS300 | 5360 | #ifdef CONFIG_FB_SIS_300 |
5459 | unsigned short crt2crtc = 0; | 5361 | unsigned short crt2crtc = 0; |
5460 | #endif | 5362 | #endif |
5461 | #ifdef SIS315H | 5363 | #ifdef CONFIG_FB_SIS_315 |
5462 | unsigned short pushcx; | 5364 | unsigned short pushcx; |
5463 | #endif | 5365 | #endif |
5464 | 5366 | ||
5465 | if(ModeNo <= 0x13) { | 5367 | if(ModeNo <= 0x13) { |
5466 | modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag; | 5368 | modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag; |
5467 | resinfo = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ResInfo; | 5369 | resinfo = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ResInfo; |
5468 | #ifdef SIS300 | 5370 | #ifdef CONFIG_FB_SIS_300 |
5469 | crt2crtc = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_CRT2CRTC; | 5371 | crt2crtc = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_CRT2CRTC; |
5470 | #endif | 5372 | #endif |
5471 | } else if(SiS_Pr->UseCustomMode) { | 5373 | } else if(SiS_Pr->UseCustomMode) { |
@@ -5473,7 +5375,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5473 | } else { | 5375 | } else { |
5474 | modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; | 5376 | modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; |
5475 | resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO; | 5377 | resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO; |
5476 | #ifdef SIS300 | 5378 | #ifdef CONFIG_FB_SIS_300 |
5477 | crt2crtc = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC; | 5379 | crt2crtc = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC; |
5478 | #endif | 5380 | #endif |
5479 | } | 5381 | } |
@@ -5494,7 +5396,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5494 | } | 5396 | } |
5495 | } | 5397 | } |
5496 | 5398 | ||
5497 | #ifdef SIS315H | 5399 | #ifdef CONFIG_FB_SIS_315 |
5498 | if((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { | 5400 | if((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { |
5499 | if(IS_SIS330) { | 5401 | if(IS_SIS330) { |
5500 | SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x2D,0x10); | 5402 | SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x2D,0x10); |
@@ -5744,7 +5646,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5744 | 5646 | ||
5745 | if(SiS_Pr->ChipType < SIS_315H) { | 5647 | if(SiS_Pr->ChipType < SIS_315H) { |
5746 | 5648 | ||
5747 | #ifdef SIS300 /* 300 series */ | 5649 | #ifdef CONFIG_FB_SIS_300 /* 300 series */ |
5748 | tempeax = SiS_Pr->SiS_VGAVDE << 6; | 5650 | tempeax = SiS_Pr->SiS_VGAVDE << 6; |
5749 | temp = (tempeax % (unsigned int)SiS_Pr->SiS_VDE); | 5651 | temp = (tempeax % (unsigned int)SiS_Pr->SiS_VDE); |
5750 | tempeax = tempeax / (unsigned int)SiS_Pr->SiS_VDE; | 5652 | tempeax = tempeax / (unsigned int)SiS_Pr->SiS_VDE; |
@@ -5755,11 +5657,11 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5755 | temp = (unsigned short)(tempeax & 0x00FF); | 5657 | temp = (unsigned short)(tempeax & 0x00FF); |
5756 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x1E,temp); /* BPLVCFACT */ | 5658 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x1E,temp); /* BPLVCFACT */ |
5757 | tempvcfact = temp; | 5659 | tempvcfact = temp; |
5758 | #endif /* SIS300 */ | 5660 | #endif /* CONFIG_FB_SIS_300 */ |
5759 | 5661 | ||
5760 | } else { | 5662 | } else { |
5761 | 5663 | ||
5762 | #ifdef SIS315H /* 315 series */ | 5664 | #ifdef CONFIG_FB_SIS_315 /* 315 series */ |
5763 | tempeax = SiS_Pr->SiS_VGAVDE << 18; | 5665 | tempeax = SiS_Pr->SiS_VGAVDE << 18; |
5764 | tempebx = SiS_Pr->SiS_VDE; | 5666 | tempebx = SiS_Pr->SiS_VDE; |
5765 | temp = (tempeax % tempebx); | 5667 | temp = (tempeax % tempebx); |
@@ -5845,7 +5747,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5845 | temp = (unsigned short)(tempecx & 0x00FF); | 5747 | temp = (unsigned short)(tempecx & 0x00FF); |
5846 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x23,temp); | 5748 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x23,temp); |
5847 | 5749 | ||
5848 | #ifdef SIS315H | 5750 | #ifdef CONFIG_FB_SIS_315 |
5849 | if(SiS_Pr->ChipType >= SIS_315H) { | 5751 | if(SiS_Pr->ChipType >= SIS_315H) { |
5850 | if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { | 5752 | if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { |
5851 | if((islvds) || (SiS_Pr->SiS_VBInfo & VB_SISLVDS)) { | 5753 | if((islvds) || (SiS_Pr->SiS_VBInfo & VB_SISLVDS)) { |
@@ -5863,7 +5765,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5863 | } | 5765 | } |
5864 | #endif | 5766 | #endif |
5865 | 5767 | ||
5866 | #ifdef SIS300 | 5768 | #ifdef CONFIG_FB_SIS_300 |
5867 | if(SiS_Pr->SiS_IF_DEF_TRUMPION) { | 5769 | if(SiS_Pr->SiS_IF_DEF_TRUMPION) { |
5868 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; | 5770 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; |
5869 | unsigned char *trumpdata; | 5771 | unsigned char *trumpdata; |
@@ -5899,7 +5801,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5899 | } | 5801 | } |
5900 | #endif | 5802 | #endif |
5901 | 5803 | ||
5902 | #ifdef SIS315H | 5804 | #ifdef CONFIG_FB_SIS_315 |
5903 | if(SiS_Pr->SiS_IF_DEF_FSTN || SiS_Pr->SiS_IF_DEF_DSTN) { | 5805 | if(SiS_Pr->SiS_IF_DEF_FSTN || SiS_Pr->SiS_IF_DEF_DSTN) { |
5904 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x25,0x00); | 5806 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x25,0x00); |
5905 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x26,0x00); | 5807 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x26,0x00); |
@@ -5999,7 +5901,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5999 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x45,0x0a); | 5901 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x45,0x0a); |
6000 | } | 5902 | } |
6001 | } | 5903 | } |
6002 | #endif /* SIS315H */ | 5904 | #endif /* CONFIG_FB_SIS_315 */ |
6003 | } | 5905 | } |
6004 | 5906 | ||
6005 | /* Set Part 1 */ | 5907 | /* Set Part 1 */ |
@@ -6007,12 +5909,12 @@ static void | |||
6007 | SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, | 5909 | SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, |
6008 | unsigned short RefreshRateTableIndex) | 5910 | unsigned short RefreshRateTableIndex) |
6009 | { | 5911 | { |
6010 | #if defined(SIS300) || defined(SIS315H) | 5912 | #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) |
6011 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; | 5913 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; |
6012 | #endif | 5914 | #endif |
6013 | unsigned short temp=0, tempax=0, tempbx=0, tempcx=0, bridgeadd=0; | 5915 | unsigned short temp=0, tempax=0, tempbx=0, tempcx=0, bridgeadd=0; |
6014 | unsigned short pushbx=0, CRT1Index=0, modeflag, resinfo=0; | 5916 | unsigned short pushbx=0, CRT1Index=0, modeflag, resinfo=0; |
6015 | #ifdef SIS315H | 5917 | #ifdef CONFIG_FB_SIS_315 |
6016 | unsigned short tempbl=0; | 5918 | unsigned short tempbl=0; |
6017 | #endif | 5919 | #endif |
6018 | 5920 | ||
@@ -6038,11 +5940,11 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6038 | (SiS_Pr->SiS_VBInfo & SetInSlaveMode)) ) { | 5940 | (SiS_Pr->SiS_VBInfo & SetInSlaveMode)) ) { |
6039 | 5941 | ||
6040 | if(SiS_Pr->ChipType < SIS_315H ) { | 5942 | if(SiS_Pr->ChipType < SIS_315H ) { |
6041 | #ifdef SIS300 | 5943 | #ifdef CONFIG_FB_SIS_300 |
6042 | SiS_SetCRT2FIFO_300(SiS_Pr, ModeNo); | 5944 | SiS_SetCRT2FIFO_300(SiS_Pr, ModeNo); |
6043 | #endif | 5945 | #endif |
6044 | } else { | 5946 | } else { |
6045 | #ifdef SIS315H | 5947 | #ifdef CONFIG_FB_SIS_315 |
6046 | SiS_SetCRT2FIFO_310(SiS_Pr); | 5948 | SiS_SetCRT2FIFO_310(SiS_Pr); |
6047 | #endif | 5949 | #endif |
6048 | } | 5950 | } |
@@ -6051,7 +5953,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6051 | 5953 | ||
6052 | if(SiS_Pr->ChipType < SIS_315H ) { | 5954 | if(SiS_Pr->ChipType < SIS_315H ) { |
6053 | 5955 | ||
6054 | #ifdef SIS300 /* ------------- 300 series --------------*/ | 5956 | #ifdef CONFIG_FB_SIS_300 /* ------------- 300 series --------------*/ |
6055 | 5957 | ||
6056 | temp = (SiS_Pr->SiS_VGAHT - 1) & 0x0FF; /* BTVGA2HT 0x08,0x09 */ | 5958 | temp = (SiS_Pr->SiS_VGAHT - 1) & 0x0FF; /* BTVGA2HT 0x08,0x09 */ |
6057 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x08,temp); /* CRT2 Horizontal Total */ | 5959 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x08,temp); /* CRT2 Horizontal Total */ |
@@ -6070,11 +5972,11 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6070 | 5972 | ||
6071 | bridgeadd = 12; | 5973 | bridgeadd = 12; |
6072 | 5974 | ||
6073 | #endif /* SIS300 */ | 5975 | #endif /* CONFIG_FB_SIS_300 */ |
6074 | 5976 | ||
6075 | } else { | 5977 | } else { |
6076 | 5978 | ||
6077 | #ifdef SIS315H /* ------------------- 315/330 series --------------- */ | 5979 | #ifdef CONFIG_FB_SIS_315 /* ------------------- 315/330 series --------------- */ |
6078 | 5980 | ||
6079 | tempcx = SiS_Pr->SiS_VGAHT; /* BTVGA2HT 0x08,0x09 */ | 5981 | tempcx = SiS_Pr->SiS_VGAHT; /* BTVGA2HT 0x08,0x09 */ |
6080 | if(modeflag & HalfDCLK) { | 5982 | if(modeflag & HalfDCLK) { |
@@ -6125,7 +6027,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6125 | } | 6027 | } |
6126 | } | 6028 | } |
6127 | 6029 | ||
6128 | #endif /* SIS315H */ | 6030 | #endif /* CONFIG_FB_SIS_315 */ |
6129 | 6031 | ||
6130 | } /* 315/330 series */ | 6032 | } /* 315/330 series */ |
6131 | 6033 | ||
@@ -6256,7 +6158,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6256 | 6158 | ||
6257 | if(SiS_Pr->ChipType < SIS_315H) { | 6159 | if(SiS_Pr->ChipType < SIS_315H) { |
6258 | 6160 | ||
6259 | #ifdef SIS300 /* ---------- 300 series -------------- */ | 6161 | #ifdef CONFIG_FB_SIS_300 /* ---------- 300 series -------------- */ |
6260 | 6162 | ||
6261 | if(SiS_Pr->SiS_VBType & VB_SISVB) { | 6163 | if(SiS_Pr->SiS_VBType & VB_SISVB) { |
6262 | temp = 0x20; | 6164 | temp = 0x20; |
@@ -6310,11 +6212,11 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6310 | 6212 | ||
6311 | SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x13,~0x3C,temp); /* Panel Link Delay Compensation; (Software Command Reset; Power Saving) */ | 6213 | SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x13,~0x3C,temp); /* Panel Link Delay Compensation; (Software Command Reset; Power Saving) */ |
6312 | 6214 | ||
6313 | #endif /* SIS300 */ | 6215 | #endif /* CONFIG_FB_SIS_300 */ |
6314 | 6216 | ||
6315 | } else { | 6217 | } else { |
6316 | 6218 | ||
6317 | #ifdef SIS315H /* --------------- 315/330 series ---------------*/ | 6219 | #ifdef CONFIG_FB_SIS_315 /* --------------- 315/330 series ---------------*/ |
6318 | 6220 | ||
6319 | if(SiS_Pr->ChipType < SIS_661) { | 6221 | if(SiS_Pr->ChipType < SIS_661) { |
6320 | 6222 | ||
@@ -6349,7 +6251,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6349 | if(modeflag & HalfDCLK) tempax |= 0x40; | 6251 | if(modeflag & HalfDCLK) tempax |= 0x40; |
6350 | SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x2C,0x3f,tempax); | 6252 | SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x2C,0x3f,tempax); |
6351 | 6253 | ||
6352 | #endif /* SIS315H */ | 6254 | #endif /* CONFIG_FB_SIS_315 */ |
6353 | 6255 | ||
6354 | } | 6256 | } |
6355 | 6257 | ||
@@ -6381,7 +6283,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6381 | /* SET PART 2 REGISTER GROUP */ | 6283 | /* SET PART 2 REGISTER GROUP */ |
6382 | /*********************************************/ | 6284 | /*********************************************/ |
6383 | 6285 | ||
6384 | #ifdef SIS315H | 6286 | #ifdef CONFIG_FB_SIS_315 |
6385 | static unsigned char * | 6287 | static unsigned char * |
6386 | SiS_GetGroup2CLVXPtr(struct SiS_Private *SiS_Pr, int tabletype) | 6288 | SiS_GetGroup2CLVXPtr(struct SiS_Private *SiS_Pr, int tabletype) |
6387 | { | 6289 | { |
@@ -6478,7 +6380,7 @@ SiS_GetCRT2Part2Ptr(struct SiS_Private *SiS_Pr,unsigned short ModeNo,unsigned sh | |||
6478 | } | 6380 | } |
6479 | #endif | 6381 | #endif |
6480 | 6382 | ||
6481 | #ifdef SIS300 | 6383 | #ifdef CONFIG_FB_SIS_300 |
6482 | static void | 6384 | static void |
6483 | SiS_Group2LCDSpecial(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short crt2crtc) | 6385 | SiS_Group2LCDSpecial(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short crt2crtc) |
6484 | { | 6386 | { |
@@ -6690,7 +6592,7 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6690 | unsigned int longtemp, PhaseIndex; | 6592 | unsigned int longtemp, PhaseIndex; |
6691 | bool newtvphase; | 6593 | bool newtvphase; |
6692 | const unsigned char *TimingPoint; | 6594 | const unsigned char *TimingPoint; |
6693 | #ifdef SIS315H | 6595 | #ifdef CONFIG_FB_SIS_315 |
6694 | unsigned short resindex, CRT2Index; | 6596 | unsigned short resindex, CRT2Index; |
6695 | const struct SiS_Part2PortTbl *CRT2Part2Ptr = NULL; | 6597 | const struct SiS_Part2PortTbl *CRT2Part2Ptr = NULL; |
6696 | 6598 | ||
@@ -7069,7 +6971,7 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
7069 | SiS_SetRegAND(SiS_Pr->SiS_Part2Port,0x17,0xFB); | 6971 | SiS_SetRegAND(SiS_Pr->SiS_Part2Port,0x17,0xFB); |
7070 | SiS_SetRegAND(SiS_Pr->SiS_Part2Port,0x18,0xDF); | 6972 | SiS_SetRegAND(SiS_Pr->SiS_Part2Port,0x18,0xDF); |
7071 | 6973 | ||
7072 | #ifdef SIS315H | 6974 | #ifdef CONFIG_FB_SIS_315 |
7073 | if(SiS_GetCRT2Part2Ptr(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex, | 6975 | if(SiS_GetCRT2Part2Ptr(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex, |
7074 | &CRT2Index, &resindex)) { | 6976 | &CRT2Index, &resindex)) { |
7075 | switch(CRT2Index) { | 6977 | switch(CRT2Index) { |
@@ -7130,12 +7032,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
7130 | 7032 | ||
7131 | /* Non-expanding: lcdvdes = tempcx = VT-1; lcdvdee = tempbx = VDE-1 */ | 7033 | /* Non-expanding: lcdvdes = tempcx = VT-1; lcdvdee = tempbx = VDE-1 */ |
7132 | 7034 | ||
7133 | #ifdef SIS_XORG_XF86 | ||
7134 | #ifdef TWDEBUG | ||
7135 | xf86DrvMsg(0, X_INFO, "lcdvdes 0x%x lcdvdee 0x%x\n", tempcx, tempbx); | ||
7136 | #endif | ||
7137 | #endif | ||
7138 | |||
7139 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x05,tempcx); /* lcdvdes */ | 7035 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x05,tempcx); /* lcdvdes */ |
7140 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x06,tempbx); /* lcdvdee */ | 7036 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x06,tempbx); /* lcdvdee */ |
7141 | 7037 | ||
@@ -7184,12 +7080,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
7184 | tempbx = SiS_Pr->CVSyncStart; | 7080 | tempbx = SiS_Pr->CVSyncStart; |
7185 | } | 7081 | } |
7186 | 7082 | ||
7187 | #ifdef SIS_XORG_XF86 | ||
7188 | #ifdef TWDEBUG | ||
7189 | xf86DrvMsg(0, X_INFO, "lcdvrs 0x%x\n", tempbx); | ||
7190 | #endif | ||
7191 | #endif | ||
7192 | |||
7193 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x04,tempbx); /* lcdvrs */ | 7083 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x04,tempbx); /* lcdvrs */ |
7194 | 7084 | ||
7195 | temp = (tempbx >> 4) & 0xF0; | 7085 | temp = (tempbx >> 4) & 0xF0; |
@@ -7201,15 +7091,9 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
7201 | temp |= (SiS_Pr->CVSyncEnd & 0x0f); | 7091 | temp |= (SiS_Pr->CVSyncEnd & 0x0f); |
7202 | } | 7092 | } |
7203 | 7093 | ||
7204 | #ifdef SIS_XORG_XF86 | ||
7205 | #ifdef TWDEBUG | ||
7206 | xf86DrvMsg(0, X_INFO, "lcdvre[3:0] 0x%x\n", (temp & 0x0f)); | ||
7207 | #endif | ||
7208 | #endif | ||
7209 | |||
7210 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x01,temp); | 7094 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x01,temp); |
7211 | 7095 | ||
7212 | #ifdef SIS300 | 7096 | #ifdef CONFIG_FB_SIS_300 |
7213 | SiS_Group2LCDSpecial(SiS_Pr, ModeNo, crt2crtc); | 7097 | SiS_Group2LCDSpecial(SiS_Pr, ModeNo, crt2crtc); |
7214 | #endif | 7098 | #endif |
7215 | 7099 | ||
@@ -7245,12 +7129,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
7245 | tempax >>= 1; | 7129 | tempax >>= 1; |
7246 | } | 7130 | } |
7247 | 7131 | ||
7248 | #ifdef SIS_XORG_XF86 | ||
7249 | #ifdef TWDEBUG | ||
7250 | xf86DrvMsg(0, X_INFO, "lcdhdee 0x%x\n", tempbx); | ||
7251 | #endif | ||
7252 | #endif | ||
7253 | |||
7254 | tempbx += bridgeoffset; | 7132 | tempbx += bridgeoffset; |
7255 | 7133 | ||
7256 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x23,tempbx); /* lcdhdee */ | 7134 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x23,tempbx); /* lcdhdee */ |
@@ -7276,12 +7154,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
7276 | tempbx += bridgeoffset; | 7154 | tempbx += bridgeoffset; |
7277 | } | 7155 | } |
7278 | 7156 | ||
7279 | #ifdef SIS_XORG_XF86 | ||
7280 | #ifdef TWDEBUG | ||
7281 | xf86DrvMsg(0, X_INFO, "lcdhrs 0x%x\n", tempbx); | ||
7282 | #endif | ||
7283 | #endif | ||
7284 | |||
7285 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x1C,tempbx); /* lcdhrs */ | 7157 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x1C,tempbx); /* lcdhrs */ |
7286 | SiS_SetRegANDOR(SiS_Pr->SiS_Part2Port,0x1D,0x0F,((tempbx >> 4) & 0xf0)); | 7158 | SiS_SetRegANDOR(SiS_Pr->SiS_Part2Port,0x1D,0x0F,((tempbx >> 4) & 0xf0)); |
7287 | 7159 | ||
@@ -7300,20 +7172,14 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
7300 | tempbx += bridgeoffset; | 7172 | tempbx += bridgeoffset; |
7301 | } | 7173 | } |
7302 | 7174 | ||
7303 | #ifdef SIS_XORG_XF86 | ||
7304 | #ifdef TWDEBUG | ||
7305 | xf86DrvMsg(0, X_INFO, "lcdhre 0x%x\n", tempbx); | ||
7306 | #endif | ||
7307 | #endif | ||
7308 | |||
7309 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x21,tempbx); /* lcdhre */ | 7175 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x21,tempbx); /* lcdhre */ |
7310 | 7176 | ||
7311 | SiS_SetGroup2_Tail(SiS_Pr, ModeNo); | 7177 | SiS_SetGroup2_Tail(SiS_Pr, ModeNo); |
7312 | 7178 | ||
7313 | #ifdef SIS300 | 7179 | #ifdef CONFIG_FB_SIS_300 |
7314 | SiS_Set300Part2Regs(SiS_Pr, ModeIdIndex, RefreshRateTableIndex, ModeNo); | 7180 | SiS_Set300Part2Regs(SiS_Pr, ModeIdIndex, RefreshRateTableIndex, ModeNo); |
7315 | #endif | 7181 | #endif |
7316 | #ifdef SIS315H | 7182 | #ifdef CONFIG_FB_SIS_315 |
7317 | } /* CRT2-LCD from table */ | 7183 | } /* CRT2-LCD from table */ |
7318 | #endif | 7184 | #endif |
7319 | } | 7185 | } |
@@ -7382,7 +7248,7 @@ SiS_SetGroup3(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
7382 | /* SET PART 4 REGISTER GROUP */ | 7248 | /* SET PART 4 REGISTER GROUP */ |
7383 | /*********************************************/ | 7249 | /*********************************************/ |
7384 | 7250 | ||
7385 | #ifdef SIS315H | 7251 | #ifdef CONFIG_FB_SIS_315 |
7386 | #if 0 | 7252 | #if 0 |
7387 | static void | 7253 | static void |
7388 | SiS_ShiftXPos(struct SiS_Private *SiS_Pr, int shift) | 7254 | SiS_ShiftXPos(struct SiS_Private *SiS_Pr, int shift) |
@@ -8011,7 +7877,7 @@ SiS_SetCHTVReg(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
8011 | 7877 | ||
8012 | if(SiS_Pr->SiS_IF_DEF_CH70xx == 1) { | 7878 | if(SiS_Pr->SiS_IF_DEF_CH70xx == 1) { |
8013 | 7879 | ||
8014 | #ifdef SIS300 | 7880 | #ifdef CONFIG_FB_SIS_300 |
8015 | 7881 | ||
8016 | /* Chrontel 7005 - I assume that it does not come with a 315 series chip */ | 7882 | /* Chrontel 7005 - I assume that it does not come with a 315 series chip */ |
8017 | 7883 | ||
@@ -8124,7 +7990,7 @@ SiS_SetCHTVReg(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
8124 | 7990 | ||
8125 | /* Chrontel 7019 - assumed that it does not come with a 300 series chip */ | 7991 | /* Chrontel 7019 - assumed that it does not come with a 300 series chip */ |
8126 | 7992 | ||
8127 | #ifdef SIS315H | 7993 | #ifdef CONFIG_FB_SIS_315 |
8128 | 7994 | ||
8129 | unsigned short temp; | 7995 | unsigned short temp; |
8130 | 7996 | ||
@@ -8175,7 +8041,7 @@ SiS_SetCHTVReg(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
8175 | 8041 | ||
8176 | } | 8042 | } |
8177 | 8043 | ||
8178 | #ifdef SIS315H /* ----------- 315 series only ---------- */ | 8044 | #ifdef CONFIG_FB_SIS_315 /* ----------- 315 series only ---------- */ |
8179 | 8045 | ||
8180 | void | 8046 | void |
8181 | SiS_Chrontel701xBLOn(struct SiS_Private *SiS_Pr) | 8047 | SiS_Chrontel701xBLOn(struct SiS_Private *SiS_Pr) |
@@ -8657,7 +8523,7 @@ SiS_ChrontelDoSomething1(struct SiS_Private *SiS_Pr) | |||
8657 | bool | 8523 | bool |
8658 | SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | 8524 | SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) |
8659 | { | 8525 | { |
8660 | #ifdef SIS300 | 8526 | #ifdef CONFIG_FB_SIS_300 |
8661 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; | 8527 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; |
8662 | #endif | 8528 | #endif |
8663 | unsigned short ModeIdIndex, RefreshRateTableIndex; | 8529 | unsigned short ModeIdIndex, RefreshRateTableIndex; |
@@ -8703,16 +8569,6 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
8703 | SiS_GetLVDSDesData(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); | 8569 | SiS_GetLVDSDesData(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); |
8704 | } | 8570 | } |
8705 | 8571 | ||
8706 | #ifdef SIS_XORG_XF86 | ||
8707 | #ifdef TWDEBUG | ||
8708 | xf86DrvMsg(0, X_INFO, "(init301: LCDHDES 0x%03x LCDVDES 0x%03x)\n", SiS_Pr->SiS_LCDHDES, SiS_Pr->SiS_LCDVDES); | ||
8709 | xf86DrvMsg(0, X_INFO, "(init301: HDE 0x%03x VDE 0x%03x)\n", SiS_Pr->SiS_HDE, SiS_Pr->SiS_VDE); | ||
8710 | xf86DrvMsg(0, X_INFO, "(init301: VGAHDE 0x%03x VGAVDE 0x%03x)\n", SiS_Pr->SiS_VGAHDE, SiS_Pr->SiS_VGAVDE); | ||
8711 | xf86DrvMsg(0, X_INFO, "(init301: HT 0x%03x VT 0x%03x)\n", SiS_Pr->SiS_HT, SiS_Pr->SiS_VT); | ||
8712 | xf86DrvMsg(0, X_INFO, "(init301: VGAHT 0x%03x VGAVT 0x%03x)\n", SiS_Pr->SiS_VGAHT, SiS_Pr->SiS_VGAVT); | ||
8713 | #endif | ||
8714 | #endif | ||
8715 | |||
8716 | if(SiS_Pr->SiS_SetFlag & LowModeTests) { | 8572 | if(SiS_Pr->SiS_SetFlag & LowModeTests) { |
8717 | SiS_SetGroup1(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); | 8573 | SiS_SetGroup1(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); |
8718 | } | 8574 | } |
@@ -8722,12 +8578,12 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
8722 | if(SiS_Pr->SiS_SetFlag & LowModeTests) { | 8578 | if(SiS_Pr->SiS_SetFlag & LowModeTests) { |
8723 | 8579 | ||
8724 | SiS_SetGroup2(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); | 8580 | SiS_SetGroup2(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); |
8725 | #ifdef SIS315H | 8581 | #ifdef CONFIG_FB_SIS_315 |
8726 | SiS_SetGroup2_C_ELV(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); | 8582 | SiS_SetGroup2_C_ELV(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); |
8727 | #endif | 8583 | #endif |
8728 | SiS_SetGroup3(SiS_Pr, ModeNo, ModeIdIndex); | 8584 | SiS_SetGroup3(SiS_Pr, ModeNo, ModeIdIndex); |
8729 | SiS_SetGroup4(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); | 8585 | SiS_SetGroup4(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); |
8730 | #ifdef SIS315H | 8586 | #ifdef CONFIG_FB_SIS_315 |
8731 | SiS_SetGroup4_C_ELV(SiS_Pr, ModeNo, ModeIdIndex); | 8587 | SiS_SetGroup4_C_ELV(SiS_Pr, ModeNo, ModeIdIndex); |
8732 | #endif | 8588 | #endif |
8733 | SiS_SetGroup5(SiS_Pr, ModeNo, ModeIdIndex); | 8589 | SiS_SetGroup5(SiS_Pr, ModeNo, ModeIdIndex); |
@@ -8758,7 +8614,7 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
8758 | if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) { | 8614 | if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) { |
8759 | if(SiS_Pr->SiS_VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { | 8615 | if(SiS_Pr->SiS_VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { |
8760 | if(SiS_Pr->SiS_IF_DEF_CH70xx == 2) { | 8616 | if(SiS_Pr->SiS_IF_DEF_CH70xx == 2) { |
8761 | #ifdef SIS315H | 8617 | #ifdef CONFIG_FB_SIS_315 |
8762 | SiS_SetCH701xForLCD(SiS_Pr); | 8618 | SiS_SetCH701xForLCD(SiS_Pr); |
8763 | #endif | 8619 | #endif |
8764 | } | 8620 | } |
@@ -8771,7 +8627,7 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
8771 | 8627 | ||
8772 | } | 8628 | } |
8773 | 8629 | ||
8774 | #ifdef SIS300 | 8630 | #ifdef CONFIG_FB_SIS_300 |
8775 | if(SiS_Pr->ChipType < SIS_315H) { | 8631 | if(SiS_Pr->ChipType < SIS_315H) { |
8776 | if(SiS_Pr->SiS_SetFlag & LowModeTests) { | 8632 | if(SiS_Pr->SiS_SetFlag & LowModeTests) { |
8777 | if(SiS_Pr->SiS_UseOEM) { | 8633 | if(SiS_Pr->SiS_UseOEM) { |
@@ -8794,7 +8650,7 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
8794 | } | 8650 | } |
8795 | #endif | 8651 | #endif |
8796 | 8652 | ||
8797 | #ifdef SIS315H | 8653 | #ifdef CONFIG_FB_SIS_315 |
8798 | if(SiS_Pr->ChipType >= SIS_315H) { | 8654 | if(SiS_Pr->ChipType >= SIS_315H) { |
8799 | if(SiS_Pr->SiS_SetFlag & LowModeTests) { | 8655 | if(SiS_Pr->SiS_SetFlag & LowModeTests) { |
8800 | if(SiS_Pr->ChipType < SIS_661) { | 8656 | if(SiS_Pr->ChipType < SIS_661) { |
@@ -8873,7 +8729,7 @@ SiS_SetupDDCN(struct SiS_Private *SiS_Pr) | |||
8873 | } | 8729 | } |
8874 | } | 8730 | } |
8875 | 8731 | ||
8876 | #ifdef SIS300 | 8732 | #ifdef CONFIG_FB_SIS_300 |
8877 | static unsigned char * | 8733 | static unsigned char * |
8878 | SiS_SetTrumpBlockLoop(struct SiS_Private *SiS_Pr, unsigned char *dataptr) | 8734 | SiS_SetTrumpBlockLoop(struct SiS_Private *SiS_Pr, unsigned char *dataptr) |
8879 | { | 8735 | { |
@@ -8923,11 +8779,6 @@ SiS_SetTrumpionBlock(struct SiS_Private *SiS_Pr, unsigned char *dataptr) | |||
8923 | dataptr = SiS_SetTrumpBlockLoop(SiS_Pr, dataptr); | 8779 | dataptr = SiS_SetTrumpBlockLoop(SiS_Pr, dataptr); |
8924 | if(!dataptr) return false; | 8780 | if(!dataptr) return false; |
8925 | } | 8781 | } |
8926 | #ifdef SIS_XORG_XF86 | ||
8927 | #ifdef TWDEBUG | ||
8928 | xf86DrvMsg(0, X_INFO, "Trumpion block success\n"); | ||
8929 | #endif | ||
8930 | #endif | ||
8931 | return true; | 8782 | return true; |
8932 | } | 8783 | } |
8933 | #endif | 8784 | #endif |
@@ -9002,9 +8853,7 @@ SiS_SetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val) | |||
9002 | SiS_SetChReg(SiS_Pr, reg, val, 0); | 8853 | SiS_SetChReg(SiS_Pr, reg, val, 0); |
9003 | } | 8854 | } |
9004 | 8855 | ||
9005 | #ifdef SIS_LINUX_KERNEL | ||
9006 | static | 8856 | static |
9007 | #endif | ||
9008 | void | 8857 | void |
9009 | SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val) | 8858 | SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val) |
9010 | { | 8859 | { |
@@ -9091,9 +8940,7 @@ SiS_GetCH701x(struct SiS_Private *SiS_Pr, unsigned short tempbx) | |||
9091 | 8940 | ||
9092 | /* Read from Chrontel 70xx */ | 8941 | /* Read from Chrontel 70xx */ |
9093 | /* Parameter is [Register no (S7-S0)] */ | 8942 | /* Parameter is [Register no (S7-S0)] */ |
9094 | #ifdef SIS_LINUX_KERNEL | ||
9095 | static | 8943 | static |
9096 | #endif | ||
9097 | unsigned short | 8944 | unsigned short |
9098 | SiS_GetCH70xx(struct SiS_Private *SiS_Pr, unsigned short tempbx) | 8945 | SiS_GetCH70xx(struct SiS_Private *SiS_Pr, unsigned short tempbx) |
9099 | { | 8946 | { |
@@ -9114,9 +8961,7 @@ SiS_SetCH70xxANDOR(struct SiS_Private *SiS_Pr, unsigned short reg, | |||
9114 | } | 8961 | } |
9115 | 8962 | ||
9116 | /* Our own DDC functions */ | 8963 | /* Our own DDC functions */ |
9117 | #ifndef SIS_XORG_XF86 | ||
9118 | static | 8964 | static |
9119 | #endif | ||
9120 | unsigned short | 8965 | unsigned short |
9121 | SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine, | 8966 | SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine, |
9122 | unsigned short adaptnum, unsigned short DDCdatatype, bool checkcr32, | 8967 | unsigned short adaptnum, unsigned short DDCdatatype, bool checkcr32, |
@@ -9224,12 +9069,6 @@ SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine, | |||
9224 | 9069 | ||
9225 | SiS_SetupDDCN(SiS_Pr); | 9070 | SiS_SetupDDCN(SiS_Pr); |
9226 | 9071 | ||
9227 | #ifdef SIS_XORG_XF86 | ||
9228 | #ifdef TWDEBUG | ||
9229 | xf86DrvMsg(0, X_INFO, "DDC Port %x Index %x Shift %d\n", | ||
9230 | SiS_Pr->SiS_DDC_Port, SiS_Pr->SiS_DDC_Index, temp); | ||
9231 | #endif | ||
9232 | #endif | ||
9233 | return 0; | 9072 | return 0; |
9234 | } | 9073 | } |
9235 | 9074 | ||
@@ -9292,11 +9131,6 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr) | |||
9292 | SiS_SetSwitchDDC2(SiS_Pr); | 9131 | SiS_SetSwitchDDC2(SiS_Pr); |
9293 | if(SiS_PrepareDDC(SiS_Pr)) { | 9132 | if(SiS_PrepareDDC(SiS_Pr)) { |
9294 | SiS_SetStop(SiS_Pr); | 9133 | SiS_SetStop(SiS_Pr); |
9295 | #ifdef SIS_XORG_XF86 | ||
9296 | #ifdef TWDEBUG | ||
9297 | xf86DrvMsg(0, X_INFO, "Probe: Prepare failed\n"); | ||
9298 | #endif | ||
9299 | #endif | ||
9300 | return 0xFFFF; | 9134 | return 0xFFFF; |
9301 | } | 9135 | } |
9302 | mask = 0xf0; | 9136 | mask = 0xf0; |
@@ -9310,11 +9144,6 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr) | |||
9310 | } else { | 9144 | } else { |
9311 | failed = true; | 9145 | failed = true; |
9312 | ret = 0xFFFF; | 9146 | ret = 0xFFFF; |
9313 | #ifdef SIS_XORG_XF86 | ||
9314 | #ifdef TWDEBUG | ||
9315 | xf86DrvMsg(0, X_INFO, "Probe: Read 1 failed\n"); | ||
9316 | #endif | ||
9317 | #endif | ||
9318 | } | 9147 | } |
9319 | } | 9148 | } |
9320 | if(!failed) { | 9149 | if(!failed) { |
@@ -9324,11 +9153,6 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr) | |||
9324 | if(temp == value) ret = 0; | 9153 | if(temp == value) ret = 0; |
9325 | else { | 9154 | else { |
9326 | ret = 0xFFFF; | 9155 | ret = 0xFFFF; |
9327 | #ifdef SIS_XORG_XF86 | ||
9328 | #ifdef TWDEBUG | ||
9329 | xf86DrvMsg(0, X_INFO, "Probe: Read 2 failed\n"); | ||
9330 | #endif | ||
9331 | #endif | ||
9332 | if(SiS_Pr->SiS_DDC_DeviceAddr == 0xa0) { | 9156 | if(SiS_Pr->SiS_DDC_DeviceAddr == 0xa0) { |
9333 | if(temp == 0x30) ret = 0; | 9157 | if(temp == 0x30) ret = 0; |
9334 | } | 9158 | } |
@@ -9338,9 +9162,7 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr) | |||
9338 | return ret; | 9162 | return ret; |
9339 | } | 9163 | } |
9340 | 9164 | ||
9341 | #ifndef SIS_XORG_XF86 | ||
9342 | static | 9165 | static |
9343 | #endif | ||
9344 | unsigned short | 9166 | unsigned short |
9345 | SiS_ProbeDDC(struct SiS_Private *SiS_Pr) | 9167 | SiS_ProbeDDC(struct SiS_Private *SiS_Pr) |
9346 | { | 9168 | { |
@@ -9357,9 +9179,7 @@ SiS_ProbeDDC(struct SiS_Private *SiS_Pr) | |||
9357 | return flag; | 9179 | return flag; |
9358 | } | 9180 | } |
9359 | 9181 | ||
9360 | #ifndef SIS_XORG_XF86 | ||
9361 | static | 9182 | static |
9362 | #endif | ||
9363 | unsigned short | 9183 | unsigned short |
9364 | SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, unsigned char *buffer) | 9184 | SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, unsigned char *buffer) |
9365 | { | 9185 | { |
@@ -9606,11 +9426,6 @@ SiS_SetSCLKHigh(struct SiS_Private *SiS_Pr) | |||
9606 | temp = SiS_GetReg(SiS_Pr->SiS_DDC_Port,SiS_Pr->SiS_DDC_Index); | 9426 | temp = SiS_GetReg(SiS_Pr->SiS_DDC_Port,SiS_Pr->SiS_DDC_Index); |
9607 | } while((!(temp & SiS_Pr->SiS_DDC_Clk)) && --watchdog); | 9427 | } while((!(temp & SiS_Pr->SiS_DDC_Clk)) && --watchdog); |
9608 | if (!watchdog) { | 9428 | if (!watchdog) { |
9609 | #ifdef SIS_XORG_XF86 | ||
9610 | #ifdef TWDEBUG | ||
9611 | xf86DrvMsg(0, X_INFO, "SetClkHigh failed\n"); | ||
9612 | #endif | ||
9613 | #endif | ||
9614 | return 0xFFFF; | 9429 | return 0xFFFF; |
9615 | } | 9430 | } |
9616 | SiS_DDC2Delay(SiS_Pr,SiS_I2CDELAYSHORT); | 9431 | SiS_DDC2Delay(SiS_Pr,SiS_I2CDELAYSHORT); |
@@ -9641,7 +9456,7 @@ SiS_CheckACK(struct SiS_Private *SiS_Pr) | |||
9641 | 9456 | ||
9642 | /* =============== SiS 315/330 O.E.M. ================= */ | 9457 | /* =============== SiS 315/330 O.E.M. ================= */ |
9643 | 9458 | ||
9644 | #ifdef SIS315H | 9459 | #ifdef CONFIG_FB_SIS_315 |
9645 | 9460 | ||
9646 | static unsigned short | 9461 | static unsigned short |
9647 | GetRAMDACromptr(struct SiS_Private *SiS_Pr) | 9462 | GetRAMDACromptr(struct SiS_Private *SiS_Pr) |
@@ -10829,7 +10644,7 @@ SiS_FinalizeLCD(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor | |||
10829 | 10644 | ||
10830 | /* ================= SiS 300 O.E.M. ================== */ | 10645 | /* ================= SiS 300 O.E.M. ================== */ |
10831 | 10646 | ||
10832 | #ifdef SIS300 | 10647 | #ifdef CONFIG_FB_SIS_300 |
10833 | 10648 | ||
10834 | static void | 10649 | static void |
10835 | SetOEMLCDData2(struct SiS_Private *SiS_Pr, unsigned short ModeNo,unsigned short ModeIdIndex, | 10650 | SetOEMLCDData2(struct SiS_Private *SiS_Pr, unsigned short ModeNo,unsigned short ModeIdIndex, |
diff --git a/drivers/video/sis/init301.h b/drivers/video/sis/init301.h index 51d99222375d..e1fd31d0fddf 100644 --- a/drivers/video/sis/init301.h +++ b/drivers/video/sis/init301.h | |||
@@ -53,15 +53,8 @@ | |||
53 | #ifndef _INIT301_H_ | 53 | #ifndef _INIT301_H_ |
54 | #define _INIT301_H_ | 54 | #define _INIT301_H_ |
55 | 55 | ||
56 | #include "osdef.h" | ||
57 | #include "initdef.h" | 56 | #include "initdef.h" |
58 | 57 | ||
59 | #ifdef SIS_XORG_XF86 | ||
60 | #include "sis.h" | ||
61 | #include "sis_regs.h" | ||
62 | #endif | ||
63 | |||
64 | #ifdef SIS_LINUX_KERNEL | ||
65 | #include "vgatypes.h" | 58 | #include "vgatypes.h" |
66 | #include "vstruct.h" | 59 | #include "vstruct.h" |
67 | #ifdef SIS_CP | 60 | #ifdef SIS_CP |
@@ -72,7 +65,6 @@ | |||
72 | #include <linux/fb.h> | 65 | #include <linux/fb.h> |
73 | #include "sis.h" | 66 | #include "sis.h" |
74 | #include <video/sisfb.h> | 67 | #include <video/sisfb.h> |
75 | #endif | ||
76 | 68 | ||
77 | static const unsigned char SiS_YPbPrTable[3][64] = { | 69 | static const unsigned char SiS_YPbPrTable[3][64] = { |
78 | { | 70 | { |
@@ -237,7 +229,7 @@ static const unsigned char SiS_Part2CLVX_6[] = { /* 1080i */ | |||
237 | 0xFF,0xFF, | 229 | 0xFF,0xFF, |
238 | }; | 230 | }; |
239 | 231 | ||
240 | #ifdef SIS315H | 232 | #ifdef CONFIG_FB_SIS_315 |
241 | /* 661 et al LCD data structure (2.03.00) */ | 233 | /* 661 et al LCD data structure (2.03.00) */ |
242 | static const unsigned char SiS_LCDStruct661[] = { | 234 | static const unsigned char SiS_LCDStruct661[] = { |
243 | /* 1024x768 */ | 235 | /* 1024x768 */ |
@@ -279,7 +271,7 @@ static const unsigned char SiS_LCDStruct661[] = { | |||
279 | }; | 271 | }; |
280 | #endif | 272 | #endif |
281 | 273 | ||
282 | #ifdef SIS300 | 274 | #ifdef CONFIG_FB_SIS_300 |
283 | static unsigned char SiS300_TrumpionData[14][80] = { | 275 | static unsigned char SiS300_TrumpionData[14][80] = { |
284 | { 0x02,0x0A,0x0A,0x01,0x04,0x01,0x00,0x03,0x0D,0x00,0x0D,0x10,0x7F,0x00,0x80,0x02, | 276 | { 0x02,0x0A,0x0A,0x01,0x04,0x01,0x00,0x03,0x0D,0x00,0x0D,0x10,0x7F,0x00,0x80,0x02, |
285 | 0x20,0x03,0x0B,0x00,0x90,0x01,0xC1,0x01,0x60,0x0C,0x30,0x10,0x00,0x00,0x04,0x23, | 277 | 0x20,0x03,0x0B,0x00,0x90,0x01,0xC1,0x01,0x60,0x0C,0x30,0x10,0x00,0x00,0x04,0x23, |
@@ -356,9 +348,6 @@ static unsigned char SiS300_TrumpionData[14][80] = { | |||
356 | #endif | 348 | #endif |
357 | 349 | ||
358 | void SiS_UnLockCRT2(struct SiS_Private *SiS_Pr); | 350 | void SiS_UnLockCRT2(struct SiS_Private *SiS_Pr); |
359 | #ifndef SIS_LINUX_KERNEL | ||
360 | void SiS_LockCRT2(struct SiS_Private *SiS_Pr); | ||
361 | #endif | ||
362 | void SiS_EnableCRT2(struct SiS_Private *SiS_Pr); | 351 | void SiS_EnableCRT2(struct SiS_Private *SiS_Pr); |
363 | unsigned short SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); | 352 | unsigned short SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); |
364 | void SiS_WaitRetrace1(struct SiS_Private *SiS_Pr); | 353 | void SiS_WaitRetrace1(struct SiS_Private *SiS_Pr); |
@@ -375,9 +364,6 @@ unsigned short SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo | |||
375 | unsigned short RefreshRateTableIndex); | 364 | unsigned short RefreshRateTableIndex); |
376 | unsigned short SiS_GetResInfo(struct SiS_Private *SiS_Pr,unsigned short ModeNo,unsigned short ModeIdIndex); | 365 | unsigned short SiS_GetResInfo(struct SiS_Private *SiS_Pr,unsigned short ModeNo,unsigned short ModeIdIndex); |
377 | void SiS_DisableBridge(struct SiS_Private *SiS_Pr); | 366 | void SiS_DisableBridge(struct SiS_Private *SiS_Pr); |
378 | #ifndef SIS_LINUX_KERNEL | ||
379 | void SiS_EnableBridge(struct SiS_Private *SiS_Pr); | ||
380 | #endif | ||
381 | bool SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo); | 367 | bool SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo); |
382 | void SiS_SiS30xBLOn(struct SiS_Private *SiS_Pr); | 368 | void SiS_SiS30xBLOn(struct SiS_Private *SiS_Pr); |
383 | void SiS_SiS30xBLOff(struct SiS_Private *SiS_Pr); | 369 | void SiS_SiS30xBLOff(struct SiS_Private *SiS_Pr); |
@@ -386,13 +372,9 @@ void SiS_SetCH700x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned cha | |||
386 | unsigned short SiS_GetCH700x(struct SiS_Private *SiS_Pr, unsigned short tempax); | 372 | unsigned short SiS_GetCH700x(struct SiS_Private *SiS_Pr, unsigned short tempax); |
387 | void SiS_SetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); | 373 | void SiS_SetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); |
388 | unsigned short SiS_GetCH701x(struct SiS_Private *SiS_Pr, unsigned short tempax); | 374 | unsigned short SiS_GetCH701x(struct SiS_Private *SiS_Pr, unsigned short tempax); |
389 | #ifndef SIS_LINUX_KERNEL | ||
390 | void SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); | ||
391 | unsigned short SiS_GetCH70xx(struct SiS_Private *SiS_Pr, unsigned short tempax); | ||
392 | #endif | ||
393 | void SiS_SetCH70xxANDOR(struct SiS_Private *SiS_Pr, unsigned short reg, | 375 | void SiS_SetCH70xxANDOR(struct SiS_Private *SiS_Pr, unsigned short reg, |
394 | unsigned char orval,unsigned short andval); | 376 | unsigned char orval,unsigned short andval); |
395 | #ifdef SIS315H | 377 | #ifdef CONFIG_FB_SIS_315 |
396 | static void SiS_Chrontel701xOn(struct SiS_Private *SiS_Pr); | 378 | static void SiS_Chrontel701xOn(struct SiS_Private *SiS_Pr); |
397 | static void SiS_Chrontel701xOff(struct SiS_Private *SiS_Pr); | 379 | static void SiS_Chrontel701xOff(struct SiS_Private *SiS_Pr); |
398 | static void SiS_ChrontelInitTVVSync(struct SiS_Private *SiS_Pr); | 380 | static void SiS_ChrontelInitTVVSync(struct SiS_Private *SiS_Pr); |
@@ -401,7 +383,7 @@ void SiS_Chrontel701xBLOn(struct SiS_Private *SiS_Pr); | |||
401 | void SiS_Chrontel701xBLOff(struct SiS_Private *SiS_Pr); | 383 | void SiS_Chrontel701xBLOff(struct SiS_Private *SiS_Pr); |
402 | #endif /* 315 */ | 384 | #endif /* 315 */ |
403 | 385 | ||
404 | #ifdef SIS300 | 386 | #ifdef CONFIG_FB_SIS_300 |
405 | static bool SiS_SetTrumpionBlock(struct SiS_Private *SiS_Pr, unsigned char *dataptr); | 387 | static bool SiS_SetTrumpionBlock(struct SiS_Private *SiS_Pr, unsigned char *dataptr); |
406 | void SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo); | 388 | void SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo); |
407 | #endif | 389 | #endif |
@@ -412,21 +394,12 @@ unsigned short SiS_HandleDDC(struct SiS_Private *SiS_Pr, unsigned int VBFlags, i | |||
412 | unsigned short adaptnum, unsigned short DDCdatatype, | 394 | unsigned short adaptnum, unsigned short DDCdatatype, |
413 | unsigned char *buffer, unsigned int VBFlags2); | 395 | unsigned char *buffer, unsigned int VBFlags2); |
414 | 396 | ||
415 | #ifdef SIS_XORG_XF86 | ||
416 | unsigned short SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, | ||
417 | int VGAEngine, unsigned short adaptnum, unsigned short DDCdatatype, | ||
418 | bool checkcr32, unsigned int VBFlags2); | ||
419 | unsigned short SiS_ProbeDDC(struct SiS_Private *SiS_Pr); | ||
420 | unsigned short SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, | ||
421 | unsigned char *buffer); | ||
422 | #else | ||
423 | static unsigned short SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, | 397 | static unsigned short SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, |
424 | int VGAEngine, unsigned short adaptnum, unsigned short DDCdatatype, | 398 | int VGAEngine, unsigned short adaptnum, unsigned short DDCdatatype, |
425 | bool checkcr32, unsigned int VBFlags2); | 399 | bool checkcr32, unsigned int VBFlags2); |
426 | static unsigned short SiS_ProbeDDC(struct SiS_Private *SiS_Pr); | 400 | static unsigned short SiS_ProbeDDC(struct SiS_Private *SiS_Pr); |
427 | static unsigned short SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, | 401 | static unsigned short SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, |
428 | unsigned char *buffer); | 402 | unsigned char *buffer); |
429 | #endif | ||
430 | static void SiS_SetSwitchDDC2(struct SiS_Private *SiS_Pr); | 403 | static void SiS_SetSwitchDDC2(struct SiS_Private *SiS_Pr); |
431 | static unsigned short SiS_SetStart(struct SiS_Private *SiS_Pr); | 404 | static unsigned short SiS_SetStart(struct SiS_Private *SiS_Pr); |
432 | static unsigned short SiS_SetStop(struct SiS_Private *SiS_Pr); | 405 | static unsigned short SiS_SetStop(struct SiS_Private *SiS_Pr); |
@@ -441,13 +414,13 @@ static unsigned short SiS_PrepareDDC(struct SiS_Private *SiS_Pr); | |||
441 | static void SiS_SendACK(struct SiS_Private *SiS_Pr, unsigned short yesno); | 414 | static void SiS_SendACK(struct SiS_Private *SiS_Pr, unsigned short yesno); |
442 | static unsigned short SiS_DoProbeDDC(struct SiS_Private *SiS_Pr); | 415 | static unsigned short SiS_DoProbeDDC(struct SiS_Private *SiS_Pr); |
443 | 416 | ||
444 | #ifdef SIS300 | 417 | #ifdef CONFIG_FB_SIS_300 |
445 | static void SiS_OEM300Setting(struct SiS_Private *SiS_Pr, | 418 | static void SiS_OEM300Setting(struct SiS_Private *SiS_Pr, |
446 | unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefTabindex); | 419 | unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefTabindex); |
447 | static void SetOEMLCDData2(struct SiS_Private *SiS_Pr, | 420 | static void SetOEMLCDData2(struct SiS_Private *SiS_Pr, |
448 | unsigned short ModeNo, unsigned short ModeIdIndex,unsigned short RefTableIndex); | 421 | unsigned short ModeNo, unsigned short ModeIdIndex,unsigned short RefTableIndex); |
449 | #endif | 422 | #endif |
450 | #ifdef SIS315H | 423 | #ifdef CONFIG_FB_SIS_315 |
451 | static void SiS_OEM310Setting(struct SiS_Private *SiS_Pr, | 424 | static void SiS_OEM310Setting(struct SiS_Private *SiS_Pr, |
452 | unsigned short ModeNo,unsigned short ModeIdIndex, unsigned short RRTI); | 425 | unsigned short ModeNo,unsigned short ModeIdIndex, unsigned short RRTI); |
453 | static void SiS_OEM661Setting(struct SiS_Private *SiS_Pr, | 426 | static void SiS_OEM661Setting(struct SiS_Private *SiS_Pr, |
@@ -482,15 +455,13 @@ extern void SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short M | |||
482 | extern void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth); | 455 | extern void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth); |
483 | extern unsigned short SiS_GetRefCRTVCLK(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide); | 456 | extern unsigned short SiS_GetRefCRTVCLK(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide); |
484 | extern unsigned short SiS_GetRefCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide); | 457 | extern unsigned short SiS_GetRefCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide); |
485 | #ifdef SIS300 | 458 | #ifdef CONFIG_FB_SIS_300 |
486 | extern void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *tempbx, | 459 | extern void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *tempbx, |
487 | unsigned short *tempcl); | 460 | unsigned short *tempcl); |
488 | extern unsigned short SiS_GetFIFOThresholdB300(unsigned short tempbx, unsigned short tempcl); | 461 | extern unsigned short SiS_GetFIFOThresholdB300(unsigned short tempbx, unsigned short tempcl); |
489 | extern unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index); | 462 | extern unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index); |
490 | #ifdef SIS_LINUX_KERNEL | ||
491 | extern unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg); | 463 | extern unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg); |
492 | extern unsigned int sisfb_read_lpc_pci_dword(struct SiS_Private *SiS_Pr, int reg); | 464 | extern unsigned int sisfb_read_lpc_pci_dword(struct SiS_Private *SiS_Pr, int reg); |
493 | #endif | 465 | #endif |
494 | #endif | ||
495 | 466 | ||
496 | #endif | 467 | #endif |
diff --git a/drivers/video/sis/initextlfb.c b/drivers/video/sis/initextlfb.c index 99c04a4855d1..9dec64da4015 100644 --- a/drivers/video/sis/initextlfb.c +++ b/drivers/video/sis/initextlfb.c | |||
@@ -25,7 +25,6 @@ | |||
25 | * Author: Thomas Winischhofer <thomas@winischhofer.net> | 25 | * Author: Thomas Winischhofer <thomas@winischhofer.net> |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include "osdef.h" | ||
29 | #include "initdef.h" | 28 | #include "initdef.h" |
30 | #include "vgatypes.h" | 29 | #include "vgatypes.h" |
31 | #include "vstruct.h" | 30 | #include "vstruct.h" |
@@ -59,7 +58,7 @@ sisfb_mode_rate_to_dclock(struct SiS_Private *SiS_Pr, unsigned char modeno, | |||
59 | 58 | ||
60 | if(rateindex > 0) rateindex--; | 59 | if(rateindex > 0) rateindex--; |
61 | 60 | ||
62 | #ifdef SIS315H | 61 | #ifdef CONFIG_FB_SIS_315 |
63 | switch(ModeNo) { | 62 | switch(ModeNo) { |
64 | case 0x5a: ModeNo = 0x50; break; | 63 | case 0x5a: ModeNo = 0x50; break; |
65 | case 0x5b: ModeNo = 0x56; | 64 | case 0x5b: ModeNo = 0x56; |
@@ -103,7 +102,7 @@ sisfb_mode_rate_to_ddata(struct SiS_Private *SiS_Pr, unsigned char modeno, | |||
103 | 102 | ||
104 | if(rateindex > 0) rateindex--; | 103 | if(rateindex > 0) rateindex--; |
105 | 104 | ||
106 | #ifdef SIS315H | 105 | #ifdef CONFIG_FB_SIS_315 |
107 | switch(ModeNo) { | 106 | switch(ModeNo) { |
108 | case 0x5a: ModeNo = 0x50; break; | 107 | case 0x5a: ModeNo = 0x50; break; |
109 | case 0x5b: ModeNo = 0x56; | 108 | case 0x5b: ModeNo = 0x56; |
@@ -187,7 +186,7 @@ sisfb_gettotalfrommode(struct SiS_Private *SiS_Pr, unsigned char modeno, int *ht | |||
187 | 186 | ||
188 | if(rateindex > 0) rateindex--; | 187 | if(rateindex > 0) rateindex--; |
189 | 188 | ||
190 | #ifdef SIS315H | 189 | #ifdef CONFIG_FB_SIS_315 |
191 | switch(ModeNo) { | 190 | switch(ModeNo) { |
192 | case 0x5a: ModeNo = 0x50; break; | 191 | case 0x5a: ModeNo = 0x50; break; |
193 | case 0x5b: ModeNo = 0x56; | 192 | case 0x5b: ModeNo = 0x56; |
diff --git a/drivers/video/sis/osdef.h b/drivers/video/sis/osdef.h deleted file mode 100644 index 6ff8f988a1a7..000000000000 --- a/drivers/video/sis/osdef.h +++ /dev/null | |||
@@ -1,133 +0,0 @@ | |||
1 | /* $XFree86$ */ | ||
2 | /* $XdotOrg$ */ | ||
3 | /* | ||
4 | * OS depending defines | ||
5 | * | ||
6 | * Copyright (C) 2001-2005 by Thomas Winischhofer, Vienna, Austria | ||
7 | * | ||
8 | * If distributed as part of the Linux kernel, the following license terms | ||
9 | * apply: | ||
10 | * | ||
11 | * * This program is free software; you can redistribute it and/or modify | ||
12 | * * it under the terms of the GNU General Public License as published by | ||
13 | * * the Free Software Foundation; either version 2 of the named License, | ||
14 | * * or any later version. | ||
15 | * * | ||
16 | * * This program is distributed in the hope that it will be useful, | ||
17 | * * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * * GNU General Public License for more details. | ||
20 | * * | ||
21 | * * You should have received a copy of the GNU General Public License | ||
22 | * * along with this program; if not, write to the Free Software | ||
23 | * * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA | ||
24 | * | ||
25 | * Otherwise, the following license terms apply: | ||
26 | * | ||
27 | * * Redistribution and use in source and binary forms, with or without | ||
28 | * * modification, are permitted provided that the following conditions | ||
29 | * * are met: | ||
30 | * * 1) Redistributions of source code must retain the above copyright | ||
31 | * * notice, this list of conditions and the following disclaimer. | ||
32 | * * 2) Redistributions in binary form must reproduce the above copyright | ||
33 | * * notice, this list of conditions and the following disclaimer in the | ||
34 | * * documentation and/or other materials provided with the distribution. | ||
35 | * * 3) The name of the author may not be used to endorse or promote products | ||
36 | * * derived from this software without specific prior written permission. | ||
37 | * * | ||
38 | * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | ||
39 | * * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | ||
40 | * * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | ||
41 | * * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
42 | * * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
43 | * * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
44 | * * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
45 | * * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
46 | * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
47 | * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
48 | * | ||
49 | * Author: Thomas Winischhofer <thomas@winischhofer.net> | ||
50 | * Silicon Integrated Systems, Inc. (used by permission) | ||
51 | * | ||
52 | */ | ||
53 | |||
54 | #ifndef _SIS_OSDEF_H_ | ||
55 | #define _SIS_OSDEF_H_ | ||
56 | |||
57 | /* The choices are: */ | ||
58 | #define SIS_LINUX_KERNEL /* Linux kernel framebuffer */ | ||
59 | #undef SIS_XORG_XF86 /* XFree86/X.org */ | ||
60 | |||
61 | #ifdef OutPortByte | ||
62 | #undef OutPortByte | ||
63 | #endif | ||
64 | |||
65 | #ifdef OutPortWord | ||
66 | #undef OutPortWord | ||
67 | #endif | ||
68 | |||
69 | #ifdef OutPortLong | ||
70 | #undef OutPortLong | ||
71 | #endif | ||
72 | |||
73 | #ifdef InPortByte | ||
74 | #undef InPortByte | ||
75 | #endif | ||
76 | |||
77 | #ifdef InPortWord | ||
78 | #undef InPortWord | ||
79 | #endif | ||
80 | |||
81 | #ifdef InPortLong | ||
82 | #undef InPortLong | ||
83 | #endif | ||
84 | |||
85 | /**********************************************************************/ | ||
86 | /* LINUX KERNEL */ | ||
87 | /**********************************************************************/ | ||
88 | |||
89 | #ifdef SIS_LINUX_KERNEL | ||
90 | |||
91 | #ifdef CONFIG_FB_SIS_300 | ||
92 | #define SIS300 | ||
93 | #endif | ||
94 | |||
95 | #ifdef CONFIG_FB_SIS_315 | ||
96 | #define SIS315H | ||
97 | #endif | ||
98 | |||
99 | #if !defined(SIS300) && !defined(SIS315H) | ||
100 | #warning Neither CONFIG_FB_SIS_300 nor CONFIG_FB_SIS_315 is set | ||
101 | #warning sisfb will not work! | ||
102 | #endif | ||
103 | |||
104 | #define OutPortByte(p,v) outb((u8)(v),(SISIOADDRESS)(p)) | ||
105 | #define OutPortWord(p,v) outw((u16)(v),(SISIOADDRESS)(p)) | ||
106 | #define OutPortLong(p,v) outl((u32)(v),(SISIOADDRESS)(p)) | ||
107 | #define InPortByte(p) inb((SISIOADDRESS)(p)) | ||
108 | #define InPortWord(p) inw((SISIOADDRESS)(p)) | ||
109 | #define InPortLong(p) inl((SISIOADDRESS)(p)) | ||
110 | #define SiS_SetMemory(MemoryAddress,MemorySize,value) memset_io(MemoryAddress, value, MemorySize) | ||
111 | |||
112 | #endif /* LINUX_KERNEL */ | ||
113 | |||
114 | /**********************************************************************/ | ||
115 | /* XFree86/X.org */ | ||
116 | /**********************************************************************/ | ||
117 | |||
118 | #ifdef SIS_XORG_XF86 | ||
119 | |||
120 | #define SIS300 | ||
121 | #define SIS315H | ||
122 | |||
123 | #define OutPortByte(p,v) outSISREG((IOADDRESS)(p),(CARD8)(v)) | ||
124 | #define OutPortWord(p,v) outSISREGW((IOADDRESS)(p),(CARD16)(v)) | ||
125 | #define OutPortLong(p,v) outSISREGL((IOADDRESS)(p),(CARD32)(v)) | ||
126 | #define InPortByte(p) inSISREG((IOADDRESS)(p)) | ||
127 | #define InPortWord(p) inSISREGW((IOADDRESS)(p)) | ||
128 | #define InPortLong(p) inSISREGL((IOADDRESS)(p)) | ||
129 | #define SiS_SetMemory(MemoryAddress,MemorySize,value) memset(MemoryAddress, value, MemorySize) | ||
130 | |||
131 | #endif /* XF86 */ | ||
132 | |||
133 | #endif /* _OSDEF_H_ */ | ||
diff --git a/drivers/video/sis/sis.h b/drivers/video/sis/sis.h index 7c5710e3fb56..80d89d37c414 100644 --- a/drivers/video/sis/sis.h +++ b/drivers/video/sis/sis.h | |||
@@ -24,7 +24,6 @@ | |||
24 | #ifndef _SIS_H_ | 24 | #ifndef _SIS_H_ |
25 | #define _SIS_H_ | 25 | #define _SIS_H_ |
26 | 26 | ||
27 | #include "osdef.h" | ||
28 | #include <video/sisfb.h> | 27 | #include <video/sisfb.h> |
29 | 28 | ||
30 | #include "vgatypes.h" | 29 | #include "vgatypes.h" |
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index b52f8e4ef1fd..7e3370f115b6 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c | |||
@@ -60,6 +60,11 @@ | |||
60 | #include "sis.h" | 60 | #include "sis.h" |
61 | #include "sis_main.h" | 61 | #include "sis_main.h" |
62 | 62 | ||
63 | #if !defined(CONFIG_FB_SIS_300) && !defined(CONFIG_FB_SIS_315) | ||
64 | #warning Neither CONFIG_FB_SIS_300 nor CONFIG_FB_SIS_315 is set | ||
65 | #warning sisfb will not work! | ||
66 | #endif | ||
67 | |||
63 | static void sisfb_handle_command(struct sis_video_info *ivideo, | 68 | static void sisfb_handle_command(struct sis_video_info *ivideo, |
64 | struct sisfb_cmd *sisfb_command); | 69 | struct sisfb_cmd *sisfb_command); |
65 | 70 | ||
@@ -4114,14 +4119,6 @@ sisfb_find_rom(struct pci_dev *pdev) | |||
4114 | if(sisfb_check_rom(rom_base, ivideo)) { | 4119 | if(sisfb_check_rom(rom_base, ivideo)) { |
4115 | 4120 | ||
4116 | if((myrombase = vmalloc(65536))) { | 4121 | if((myrombase = vmalloc(65536))) { |
4117 | |||
4118 | /* Work around bug in pci/rom.c: Folks forgot to check | ||
4119 | * whether the size retrieved from the BIOS image eventually | ||
4120 | * is larger than the mapped size | ||
4121 | */ | ||
4122 | if(pci_resource_len(pdev, PCI_ROM_RESOURCE) < romsize) | ||
4123 | romsize = pci_resource_len(pdev, PCI_ROM_RESOURCE); | ||
4124 | |||
4125 | memcpy_fromio(myrombase, rom_base, | 4122 | memcpy_fromio(myrombase, rom_base, |
4126 | (romsize > 65536) ? 65536 : romsize); | 4123 | (romsize > 65536) ? 65536 : romsize); |
4127 | } | 4124 | } |
@@ -4155,23 +4152,6 @@ sisfb_find_rom(struct pci_dev *pdev) | |||
4155 | 4152 | ||
4156 | } | 4153 | } |
4157 | 4154 | ||
4158 | #else | ||
4159 | |||
4160 | pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &temp); | ||
4161 | pci_write_config_dword(pdev, PCI_ROM_ADDRESS, | ||
4162 | (ivideo->video_base & PCI_ROM_ADDRESS_MASK) | PCI_ROM_ADDRESS_ENABLE); | ||
4163 | |||
4164 | rom_base = ioremap(ivideo->video_base, 65536); | ||
4165 | if(rom_base) { | ||
4166 | if(sisfb_check_rom(rom_base, ivideo)) { | ||
4167 | if((myrombase = vmalloc(65536))) | ||
4168 | memcpy_fromio(myrombase, rom_base, 65536); | ||
4169 | } | ||
4170 | iounmap(rom_base); | ||
4171 | } | ||
4172 | |||
4173 | pci_write_config_dword(pdev, PCI_ROM_ADDRESS, temp); | ||
4174 | |||
4175 | #endif | 4155 | #endif |
4176 | 4156 | ||
4177 | return myrombase; | 4157 | return myrombase; |
@@ -4181,6 +4161,9 @@ static void __devinit | |||
4181 | sisfb_post_map_vram(struct sis_video_info *ivideo, unsigned int *mapsize, | 4161 | sisfb_post_map_vram(struct sis_video_info *ivideo, unsigned int *mapsize, |
4182 | unsigned int min) | 4162 | unsigned int min) |
4183 | { | 4163 | { |
4164 | if (*mapsize < (min << 20)) | ||
4165 | return; | ||
4166 | |||
4184 | ivideo->video_vbase = ioremap(ivideo->video_base, (*mapsize)); | 4167 | ivideo->video_vbase = ioremap(ivideo->video_base, (*mapsize)); |
4185 | 4168 | ||
4186 | if(!ivideo->video_vbase) { | 4169 | if(!ivideo->video_vbase) { |
@@ -4514,7 +4497,7 @@ sisfb_post_sis300(struct pci_dev *pdev) | |||
4514 | } else { | 4497 | } else { |
4515 | #endif | 4498 | #endif |
4516 | /* Need to map max FB size for finding out about RAM size */ | 4499 | /* Need to map max FB size for finding out about RAM size */ |
4517 | mapsize = 64 << 20; | 4500 | mapsize = ivideo->video_size; |
4518 | sisfb_post_map_vram(ivideo, &mapsize, 4); | 4501 | sisfb_post_map_vram(ivideo, &mapsize, 4); |
4519 | 4502 | ||
4520 | if(ivideo->video_vbase) { | 4503 | if(ivideo->video_vbase) { |
@@ -4680,7 +4663,7 @@ sisfb_post_xgi_ramsize(struct sis_video_info *ivideo) | |||
4680 | orSISIDXREG(SISSR, 0x20, (0x80 | 0x04)); | 4663 | orSISIDXREG(SISSR, 0x20, (0x80 | 0x04)); |
4681 | 4664 | ||
4682 | /* Need to map max FB size for finding out about RAM size */ | 4665 | /* Need to map max FB size for finding out about RAM size */ |
4683 | mapsize = 256 << 20; | 4666 | mapsize = ivideo->video_size; |
4684 | sisfb_post_map_vram(ivideo, &mapsize, 32); | 4667 | sisfb_post_map_vram(ivideo, &mapsize, 32); |
4685 | 4668 | ||
4686 | if(!ivideo->video_vbase) { | 4669 | if(!ivideo->video_vbase) { |
@@ -5936,6 +5919,7 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
5936 | } | 5919 | } |
5937 | 5920 | ||
5938 | ivideo->video_base = pci_resource_start(pdev, 0); | 5921 | ivideo->video_base = pci_resource_start(pdev, 0); |
5922 | ivideo->video_size = pci_resource_len(pdev, 0); | ||
5939 | ivideo->mmio_base = pci_resource_start(pdev, 1); | 5923 | ivideo->mmio_base = pci_resource_start(pdev, 1); |
5940 | ivideo->mmio_size = pci_resource_len(pdev, 1); | 5924 | ivideo->mmio_size = pci_resource_len(pdev, 1); |
5941 | ivideo->SiS_Pr.RelIO = pci_resource_start(pdev, 2) + 0x30; | 5925 | ivideo->SiS_Pr.RelIO = pci_resource_start(pdev, 2) + 0x30; |
diff --git a/drivers/video/sis/vgatypes.h b/drivers/video/sis/vgatypes.h index 81a22eaabfde..12c0dfaf2518 100644 --- a/drivers/video/sis/vgatypes.h +++ b/drivers/video/sis/vgatypes.h | |||
@@ -55,21 +55,10 @@ | |||
55 | 55 | ||
56 | #define SISIOMEMTYPE | 56 | #define SISIOMEMTYPE |
57 | 57 | ||
58 | #ifdef SIS_LINUX_KERNEL | ||
59 | typedef unsigned long SISIOADDRESS; | 58 | typedef unsigned long SISIOADDRESS; |
60 | #include <linux/types.h> /* Need __iomem */ | 59 | #include <linux/types.h> /* Need __iomem */ |
61 | #undef SISIOMEMTYPE | 60 | #undef SISIOMEMTYPE |
62 | #define SISIOMEMTYPE __iomem | 61 | #define SISIOMEMTYPE __iomem |
63 | #endif | ||
64 | |||
65 | #ifdef SIS_XORG_XF86 | ||
66 | #if XF86_VERSION_CURRENT < XF86_VERSION_NUMERIC(4,2,0,0,0) | ||
67 | typedef unsigned long IOADDRESS; | ||
68 | typedef unsigned long SISIOADDRESS; | ||
69 | #else | ||
70 | typedef IOADDRESS SISIOADDRESS; | ||
71 | #endif | ||
72 | #endif | ||
73 | 62 | ||
74 | typedef enum _SIS_CHIP_TYPE { | 63 | typedef enum _SIS_CHIP_TYPE { |
75 | SIS_VGALegacy = 0, | 64 | SIS_VGALegacy = 0, |
diff --git a/drivers/video/sis/vstruct.h b/drivers/video/sis/vstruct.h index bef4aae388d0..ea94d214dcff 100644 --- a/drivers/video/sis/vstruct.h +++ b/drivers/video/sis/vstruct.h | |||
@@ -233,24 +233,15 @@ struct SiS_Private | |||
233 | { | 233 | { |
234 | unsigned char ChipType; | 234 | unsigned char ChipType; |
235 | unsigned char ChipRevision; | 235 | unsigned char ChipRevision; |
236 | #ifdef SIS_XORG_XF86 | ||
237 | PCITAG PciTag; | ||
238 | #endif | ||
239 | #ifdef SIS_LINUX_KERNEL | ||
240 | void *ivideo; | 236 | void *ivideo; |
241 | #endif | ||
242 | unsigned char *VirtualRomBase; | 237 | unsigned char *VirtualRomBase; |
243 | bool UseROM; | 238 | bool UseROM; |
244 | #ifdef SIS_LINUX_KERNEL | ||
245 | unsigned char SISIOMEMTYPE *VideoMemoryAddress; | 239 | unsigned char SISIOMEMTYPE *VideoMemoryAddress; |
246 | unsigned int VideoMemorySize; | 240 | unsigned int VideoMemorySize; |
247 | #endif | ||
248 | SISIOADDRESS IOAddress; | 241 | SISIOADDRESS IOAddress; |
249 | SISIOADDRESS IOAddress2; /* For dual chip XGI volari */ | 242 | SISIOADDRESS IOAddress2; /* For dual chip XGI volari */ |
250 | 243 | ||
251 | #ifdef SIS_LINUX_KERNEL | ||
252 | SISIOADDRESS RelIO; | 244 | SISIOADDRESS RelIO; |
253 | #endif | ||
254 | SISIOADDRESS SiS_P3c4; | 245 | SISIOADDRESS SiS_P3c4; |
255 | SISIOADDRESS SiS_P3d4; | 246 | SISIOADDRESS SiS_P3d4; |
256 | SISIOADDRESS SiS_P3c0; | 247 | SISIOADDRESS SiS_P3c0; |
@@ -280,9 +271,6 @@ struct SiS_Private | |||
280 | unsigned short SiS_IF_DEF_FSTN; | 271 | unsigned short SiS_IF_DEF_FSTN; |
281 | unsigned short SiS_SysFlags; | 272 | unsigned short SiS_SysFlags; |
282 | unsigned char SiS_VGAINFO; | 273 | unsigned char SiS_VGAINFO; |
283 | #ifdef SIS_XORG_XF86 | ||
284 | unsigned short SiS_CP1, SiS_CP2, SiS_CP3, SiS_CP4; | ||
285 | #endif | ||
286 | bool SiS_UseROM; | 274 | bool SiS_UseROM; |
287 | bool SiS_ROMNew; | 275 | bool SiS_ROMNew; |
288 | bool SiS_XGIROM; | 276 | bool SiS_XGIROM; |
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 3a43ebf83a49..efb35aa8309a 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c | |||
@@ -9,19 +9,19 @@ static ssize_t device_show(struct device *_d, | |||
9 | struct device_attribute *attr, char *buf) | 9 | struct device_attribute *attr, char *buf) |
10 | { | 10 | { |
11 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 11 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); |
12 | return sprintf(buf, "%hu", dev->id.device); | 12 | return sprintf(buf, "0x%04x\n", dev->id.device); |
13 | } | 13 | } |
14 | static ssize_t vendor_show(struct device *_d, | 14 | static ssize_t vendor_show(struct device *_d, |
15 | struct device_attribute *attr, char *buf) | 15 | struct device_attribute *attr, char *buf) |
16 | { | 16 | { |
17 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 17 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); |
18 | return sprintf(buf, "%hu", dev->id.vendor); | 18 | return sprintf(buf, "0x%04x\n", dev->id.vendor); |
19 | } | 19 | } |
20 | static ssize_t status_show(struct device *_d, | 20 | static ssize_t status_show(struct device *_d, |
21 | struct device_attribute *attr, char *buf) | 21 | struct device_attribute *attr, char *buf) |
22 | { | 22 | { |
23 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 23 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); |
24 | return sprintf(buf, "0x%08x", dev->config->get_status(dev)); | 24 | return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); |
25 | } | 25 | } |
26 | static ssize_t modalias_show(struct device *_d, | 26 | static ssize_t modalias_show(struct device *_d, |
27 | struct device_attribute *attr, char *buf) | 27 | struct device_attribute *attr, char *buf) |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 1475ed6b575f..cc2f73e03475 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -230,9 +230,6 @@ add_head: | |||
230 | pr_debug("Added buffer head %i to %p\n", head, vq); | 230 | pr_debug("Added buffer head %i to %p\n", head, vq); |
231 | END_USE(vq); | 231 | END_USE(vq); |
232 | 232 | ||
233 | /* If we're indirect, we can fit many (assuming not OOM). */ | ||
234 | if (vq->indirect) | ||
235 | return vq->num_free ? vq->vring.num : 0; | ||
236 | return vq->num_free; | 233 | return vq->num_free; |
237 | } | 234 | } |
238 | EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp); | 235 | EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp); |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index eb8a78d77d9d..533a199e7a3f 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -8,9 +8,12 @@ obj-$(CONFIG_BLOCK) += biomerge.o | |||
8 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o | 8 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o |
9 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o | 9 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o |
10 | obj-$(CONFIG_XEN_BALLOON) += balloon.o | 10 | obj-$(CONFIG_XEN_BALLOON) += balloon.o |
11 | obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o | 11 | obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o |
12 | obj-$(CONFIG_XENFS) += xenfs/ | 12 | obj-$(CONFIG_XENFS) += xenfs/ |
13 | obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o | 13 | obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o |
14 | obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o | 14 | obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o |
15 | obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o | 15 | obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o |
16 | obj-$(CONFIG_XEN_DOM0) += pci.o | 16 | obj-$(CONFIG_XEN_DOM0) += pci.o |
17 | |||
18 | xen-evtchn-y := evtchn.o | ||
19 | |||
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 500290b150bb..2b17ad5b4b32 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <asm/pgtable.h> | 50 | #include <asm/pgtable.h> |
51 | #include <asm/uaccess.h> | 51 | #include <asm/uaccess.h> |
52 | #include <asm/tlb.h> | 52 | #include <asm/tlb.h> |
53 | #include <asm/e820.h> | ||
53 | 54 | ||
54 | #include <asm/xen/hypervisor.h> | 55 | #include <asm/xen/hypervisor.h> |
55 | #include <asm/xen/hypercall.h> | 56 | #include <asm/xen/hypercall.h> |
@@ -119,7 +120,7 @@ static void scrub_page(struct page *page) | |||
119 | } | 120 | } |
120 | 121 | ||
121 | /* balloon_append: add the given page to the balloon. */ | 122 | /* balloon_append: add the given page to the balloon. */ |
122 | static void balloon_append(struct page *page) | 123 | static void __balloon_append(struct page *page) |
123 | { | 124 | { |
124 | /* Lowmem is re-populated first, so highmem pages go at list tail. */ | 125 | /* Lowmem is re-populated first, so highmem pages go at list tail. */ |
125 | if (PageHighMem(page)) { | 126 | if (PageHighMem(page)) { |
@@ -130,7 +131,11 @@ static void balloon_append(struct page *page) | |||
130 | list_add(&page->lru, &ballooned_pages); | 131 | list_add(&page->lru, &ballooned_pages); |
131 | balloon_stats.balloon_low++; | 132 | balloon_stats.balloon_low++; |
132 | } | 133 | } |
134 | } | ||
133 | 135 | ||
136 | static void balloon_append(struct page *page) | ||
137 | { | ||
138 | __balloon_append(page); | ||
134 | totalram_pages--; | 139 | totalram_pages--; |
135 | } | 140 | } |
136 | 141 | ||
@@ -191,7 +196,7 @@ static unsigned long current_target(void) | |||
191 | 196 | ||
192 | static int increase_reservation(unsigned long nr_pages) | 197 | static int increase_reservation(unsigned long nr_pages) |
193 | { | 198 | { |
194 | unsigned long pfn, i, flags; | 199 | unsigned long pfn, i; |
195 | struct page *page; | 200 | struct page *page; |
196 | long rc; | 201 | long rc; |
197 | struct xen_memory_reservation reservation = { | 202 | struct xen_memory_reservation reservation = { |
@@ -203,8 +208,6 @@ static int increase_reservation(unsigned long nr_pages) | |||
203 | if (nr_pages > ARRAY_SIZE(frame_list)) | 208 | if (nr_pages > ARRAY_SIZE(frame_list)) |
204 | nr_pages = ARRAY_SIZE(frame_list); | 209 | nr_pages = ARRAY_SIZE(frame_list); |
205 | 210 | ||
206 | spin_lock_irqsave(&xen_reservation_lock, flags); | ||
207 | |||
208 | page = balloon_first_page(); | 211 | page = balloon_first_page(); |
209 | for (i = 0; i < nr_pages; i++) { | 212 | for (i = 0; i < nr_pages; i++) { |
210 | BUG_ON(page == NULL); | 213 | BUG_ON(page == NULL); |
@@ -247,14 +250,12 @@ static int increase_reservation(unsigned long nr_pages) | |||
247 | balloon_stats.current_pages += rc; | 250 | balloon_stats.current_pages += rc; |
248 | 251 | ||
249 | out: | 252 | out: |
250 | spin_unlock_irqrestore(&xen_reservation_lock, flags); | ||
251 | |||
252 | return rc < 0 ? rc : rc != nr_pages; | 253 | return rc < 0 ? rc : rc != nr_pages; |
253 | } | 254 | } |
254 | 255 | ||
255 | static int decrease_reservation(unsigned long nr_pages) | 256 | static int decrease_reservation(unsigned long nr_pages) |
256 | { | 257 | { |
257 | unsigned long pfn, i, flags; | 258 | unsigned long pfn, i; |
258 | struct page *page; | 259 | struct page *page; |
259 | int need_sleep = 0; | 260 | int need_sleep = 0; |
260 | int ret; | 261 | int ret; |
@@ -292,8 +293,6 @@ static int decrease_reservation(unsigned long nr_pages) | |||
292 | kmap_flush_unused(); | 293 | kmap_flush_unused(); |
293 | flush_tlb_all(); | 294 | flush_tlb_all(); |
294 | 295 | ||
295 | spin_lock_irqsave(&xen_reservation_lock, flags); | ||
296 | |||
297 | /* No more mappings: invalidate P2M and add to balloon. */ | 296 | /* No more mappings: invalidate P2M and add to balloon. */ |
298 | for (i = 0; i < nr_pages; i++) { | 297 | for (i = 0; i < nr_pages; i++) { |
299 | pfn = mfn_to_pfn(frame_list[i]); | 298 | pfn = mfn_to_pfn(frame_list[i]); |
@@ -308,8 +307,6 @@ static int decrease_reservation(unsigned long nr_pages) | |||
308 | 307 | ||
309 | balloon_stats.current_pages -= nr_pages; | 308 | balloon_stats.current_pages -= nr_pages; |
310 | 309 | ||
311 | spin_unlock_irqrestore(&xen_reservation_lock, flags); | ||
312 | |||
313 | return need_sleep; | 310 | return need_sleep; |
314 | } | 311 | } |
315 | 312 | ||
@@ -395,7 +392,7 @@ static struct notifier_block xenstore_notifier; | |||
395 | 392 | ||
396 | static int __init balloon_init(void) | 393 | static int __init balloon_init(void) |
397 | { | 394 | { |
398 | unsigned long pfn; | 395 | unsigned long pfn, extra_pfn_end; |
399 | struct page *page; | 396 | struct page *page; |
400 | 397 | ||
401 | if (!xen_pv_domain()) | 398 | if (!xen_pv_domain()) |
@@ -416,10 +413,15 @@ static int __init balloon_init(void) | |||
416 | register_balloon(&balloon_sysdev); | 413 | register_balloon(&balloon_sysdev); |
417 | 414 | ||
418 | /* Initialise the balloon with excess memory space. */ | 415 | /* Initialise the balloon with excess memory space. */ |
419 | for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { | 416 | extra_pfn_end = min(e820_end_of_ram_pfn(), |
417 | (unsigned long)PFN_DOWN(xen_extra_mem_start + xen_extra_mem_size)); | ||
418 | for (pfn = PFN_UP(xen_extra_mem_start); | ||
419 | pfn < extra_pfn_end; | ||
420 | pfn++) { | ||
420 | page = pfn_to_page(pfn); | 421 | page = pfn_to_page(pfn); |
421 | if (!PageReserved(page)) | 422 | /* totalram_pages doesn't include the boot-time |
422 | balloon_append(page); | 423 | balloon extension, so don't subtract from it. */ |
424 | __balloon_append(page); | ||
423 | } | 425 | } |
424 | 426 | ||
425 | target_watch.callback = watch_target; | 427 | target_watch.callback = watch_target; |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 321a0c8346e5..2811bb988ea0 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -278,17 +278,17 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |||
278 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); | 278 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); |
279 | #endif | 279 | #endif |
280 | 280 | ||
281 | __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); | 281 | clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); |
282 | __set_bit(chn, cpu_evtchn_mask(cpu)); | 282 | set_bit(chn, cpu_evtchn_mask(cpu)); |
283 | 283 | ||
284 | irq_info[irq].cpu = cpu; | 284 | irq_info[irq].cpu = cpu; |
285 | } | 285 | } |
286 | 286 | ||
287 | static void init_evtchn_cpu_bindings(void) | 287 | static void init_evtchn_cpu_bindings(void) |
288 | { | 288 | { |
289 | int i; | ||
289 | #ifdef CONFIG_SMP | 290 | #ifdef CONFIG_SMP |
290 | struct irq_desc *desc; | 291 | struct irq_desc *desc; |
291 | int i; | ||
292 | 292 | ||
293 | /* By default all event channels notify CPU#0. */ | 293 | /* By default all event channels notify CPU#0. */ |
294 | for_each_irq_desc(i, desc) { | 294 | for_each_irq_desc(i, desc) { |
@@ -296,7 +296,10 @@ static void init_evtchn_cpu_bindings(void) | |||
296 | } | 296 | } |
297 | #endif | 297 | #endif |
298 | 298 | ||
299 | memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s)); | 299 | for_each_possible_cpu(i) |
300 | memset(cpu_evtchn_mask(i), | ||
301 | (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s)); | ||
302 | |||
300 | } | 303 | } |
301 | 304 | ||
302 | static inline void clear_evtchn(int port) | 305 | static inline void clear_evtchn(int port) |
@@ -752,7 +755,7 @@ int xen_destroy_irq(int irq) | |||
752 | goto out; | 755 | goto out; |
753 | 756 | ||
754 | if (xen_initial_domain()) { | 757 | if (xen_initial_domain()) { |
755 | unmap_irq.pirq = info->u.pirq.gsi; | 758 | unmap_irq.pirq = info->u.pirq.pirq; |
756 | unmap_irq.domid = DOMID_SELF; | 759 | unmap_irq.domid = DOMID_SELF; |
757 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); | 760 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); |
758 | if (rc) { | 761 | if (rc) { |
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index fec6ba3c08a8..ef11daf0cafe 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c | |||
@@ -69,20 +69,51 @@ struct per_user_data { | |||
69 | const char *name; | 69 | const char *name; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | /* Who's bound to each port? */ | 72 | /* |
73 | static struct per_user_data *port_user[NR_EVENT_CHANNELS]; | 73 | * Who's bound to each port? This is logically an array of struct |
74 | * per_user_data *, but we encode the current enabled-state in bit 0. | ||
75 | */ | ||
76 | static unsigned long *port_user; | ||
74 | static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */ | 77 | static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */ |
75 | 78 | ||
76 | irqreturn_t evtchn_interrupt(int irq, void *data) | 79 | static inline struct per_user_data *get_port_user(unsigned port) |
80 | { | ||
81 | return (struct per_user_data *)(port_user[port] & ~1); | ||
82 | } | ||
83 | |||
84 | static inline void set_port_user(unsigned port, struct per_user_data *u) | ||
85 | { | ||
86 | port_user[port] = (unsigned long)u; | ||
87 | } | ||
88 | |||
89 | static inline bool get_port_enabled(unsigned port) | ||
90 | { | ||
91 | return port_user[port] & 1; | ||
92 | } | ||
93 | |||
94 | static inline void set_port_enabled(unsigned port, bool enabled) | ||
95 | { | ||
96 | if (enabled) | ||
97 | port_user[port] |= 1; | ||
98 | else | ||
99 | port_user[port] &= ~1; | ||
100 | } | ||
101 | |||
102 | static irqreturn_t evtchn_interrupt(int irq, void *data) | ||
77 | { | 103 | { |
78 | unsigned int port = (unsigned long)data; | 104 | unsigned int port = (unsigned long)data; |
79 | struct per_user_data *u; | 105 | struct per_user_data *u; |
80 | 106 | ||
81 | spin_lock(&port_user_lock); | 107 | spin_lock(&port_user_lock); |
82 | 108 | ||
83 | u = port_user[port]; | 109 | u = get_port_user(port); |
110 | |||
111 | WARN(!get_port_enabled(port), | ||
112 | "Interrupt for port %d, but apparently not enabled; per-user %p\n", | ||
113 | port, u); | ||
84 | 114 | ||
85 | disable_irq_nosync(irq); | 115 | disable_irq_nosync(irq); |
116 | set_port_enabled(port, false); | ||
86 | 117 | ||
87 | if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { | 118 | if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { |
88 | u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port; | 119 | u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port; |
@@ -92,9 +123,8 @@ irqreturn_t evtchn_interrupt(int irq, void *data) | |||
92 | kill_fasync(&u->evtchn_async_queue, | 123 | kill_fasync(&u->evtchn_async_queue, |
93 | SIGIO, POLL_IN); | 124 | SIGIO, POLL_IN); |
94 | } | 125 | } |
95 | } else { | 126 | } else |
96 | u->ring_overflow = 1; | 127 | u->ring_overflow = 1; |
97 | } | ||
98 | 128 | ||
99 | spin_unlock(&port_user_lock); | 129 | spin_unlock(&port_user_lock); |
100 | 130 | ||
@@ -198,9 +228,18 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf, | |||
198 | goto out; | 228 | goto out; |
199 | 229 | ||
200 | spin_lock_irq(&port_user_lock); | 230 | spin_lock_irq(&port_user_lock); |
201 | for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) | 231 | |
202 | if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u)) | 232 | for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) { |
203 | enable_irq(irq_from_evtchn(kbuf[i])); | 233 | unsigned port = kbuf[i]; |
234 | |||
235 | if (port < NR_EVENT_CHANNELS && | ||
236 | get_port_user(port) == u && | ||
237 | !get_port_enabled(port)) { | ||
238 | set_port_enabled(port, true); | ||
239 | enable_irq(irq_from_evtchn(port)); | ||
240 | } | ||
241 | } | ||
242 | |||
204 | spin_unlock_irq(&port_user_lock); | 243 | spin_unlock_irq(&port_user_lock); |
205 | 244 | ||
206 | rc = count; | 245 | rc = count; |
@@ -222,8 +261,9 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) | |||
222 | * interrupt handler yet, and our caller has already | 261 | * interrupt handler yet, and our caller has already |
223 | * serialized bind operations.) | 262 | * serialized bind operations.) |
224 | */ | 263 | */ |
225 | BUG_ON(port_user[port] != NULL); | 264 | BUG_ON(get_port_user(port) != NULL); |
226 | port_user[port] = u; | 265 | set_port_user(port, u); |
266 | set_port_enabled(port, true); /* start enabled */ | ||
227 | 267 | ||
228 | rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, | 268 | rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, |
229 | u->name, (void *)(unsigned long)port); | 269 | u->name, (void *)(unsigned long)port); |
@@ -239,10 +279,7 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port) | |||
239 | 279 | ||
240 | unbind_from_irqhandler(irq, (void *)(unsigned long)port); | 280 | unbind_from_irqhandler(irq, (void *)(unsigned long)port); |
241 | 281 | ||
242 | /* make sure we unbind the irq handler before clearing the port */ | 282 | set_port_user(port, NULL); |
243 | barrier(); | ||
244 | |||
245 | port_user[port] = NULL; | ||
246 | } | 283 | } |
247 | 284 | ||
248 | static long evtchn_ioctl(struct file *file, | 285 | static long evtchn_ioctl(struct file *file, |
@@ -333,15 +370,17 @@ static long evtchn_ioctl(struct file *file, | |||
333 | spin_lock_irq(&port_user_lock); | 370 | spin_lock_irq(&port_user_lock); |
334 | 371 | ||
335 | rc = -ENOTCONN; | 372 | rc = -ENOTCONN; |
336 | if (port_user[unbind.port] != u) { | 373 | if (get_port_user(unbind.port) != u) { |
337 | spin_unlock_irq(&port_user_lock); | 374 | spin_unlock_irq(&port_user_lock); |
338 | break; | 375 | break; |
339 | } | 376 | } |
340 | 377 | ||
341 | evtchn_unbind_from_user(u, unbind.port); | 378 | disable_irq(irq_from_evtchn(unbind.port)); |
342 | 379 | ||
343 | spin_unlock_irq(&port_user_lock); | 380 | spin_unlock_irq(&port_user_lock); |
344 | 381 | ||
382 | evtchn_unbind_from_user(u, unbind.port); | ||
383 | |||
345 | rc = 0; | 384 | rc = 0; |
346 | break; | 385 | break; |
347 | } | 386 | } |
@@ -355,7 +394,7 @@ static long evtchn_ioctl(struct file *file, | |||
355 | 394 | ||
356 | if (notify.port >= NR_EVENT_CHANNELS) { | 395 | if (notify.port >= NR_EVENT_CHANNELS) { |
357 | rc = -EINVAL; | 396 | rc = -EINVAL; |
358 | } else if (port_user[notify.port] != u) { | 397 | } else if (get_port_user(notify.port) != u) { |
359 | rc = -ENOTCONN; | 398 | rc = -ENOTCONN; |
360 | } else { | 399 | } else { |
361 | notify_remote_via_evtchn(notify.port); | 400 | notify_remote_via_evtchn(notify.port); |
@@ -431,7 +470,7 @@ static int evtchn_open(struct inode *inode, struct file *filp) | |||
431 | 470 | ||
432 | filp->private_data = u; | 471 | filp->private_data = u; |
433 | 472 | ||
434 | return 0; | 473 | return nonseekable_open(inode, filp);; |
435 | } | 474 | } |
436 | 475 | ||
437 | static int evtchn_release(struct inode *inode, struct file *filp) | 476 | static int evtchn_release(struct inode *inode, struct file *filp) |
@@ -444,14 +483,21 @@ static int evtchn_release(struct inode *inode, struct file *filp) | |||
444 | free_page((unsigned long)u->ring); | 483 | free_page((unsigned long)u->ring); |
445 | 484 | ||
446 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { | 485 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { |
447 | if (port_user[i] != u) | 486 | if (get_port_user(i) != u) |
448 | continue; | 487 | continue; |
449 | 488 | ||
450 | evtchn_unbind_from_user(port_user[i], i); | 489 | disable_irq(irq_from_evtchn(i)); |
451 | } | 490 | } |
452 | 491 | ||
453 | spin_unlock_irq(&port_user_lock); | 492 | spin_unlock_irq(&port_user_lock); |
454 | 493 | ||
494 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { | ||
495 | if (get_port_user(i) != u) | ||
496 | continue; | ||
497 | |||
498 | evtchn_unbind_from_user(get_port_user(i), i); | ||
499 | } | ||
500 | |||
455 | kfree(u->name); | 501 | kfree(u->name); |
456 | kfree(u); | 502 | kfree(u); |
457 | 503 | ||
@@ -467,12 +513,12 @@ static const struct file_operations evtchn_fops = { | |||
467 | .fasync = evtchn_fasync, | 513 | .fasync = evtchn_fasync, |
468 | .open = evtchn_open, | 514 | .open = evtchn_open, |
469 | .release = evtchn_release, | 515 | .release = evtchn_release, |
470 | .llseek = noop_llseek, | 516 | .llseek = no_llseek, |
471 | }; | 517 | }; |
472 | 518 | ||
473 | static struct miscdevice evtchn_miscdev = { | 519 | static struct miscdevice evtchn_miscdev = { |
474 | .minor = MISC_DYNAMIC_MINOR, | 520 | .minor = MISC_DYNAMIC_MINOR, |
475 | .name = "evtchn", | 521 | .name = "xen/evtchn", |
476 | .fops = &evtchn_fops, | 522 | .fops = &evtchn_fops, |
477 | }; | 523 | }; |
478 | static int __init evtchn_init(void) | 524 | static int __init evtchn_init(void) |
@@ -482,8 +528,11 @@ static int __init evtchn_init(void) | |||
482 | if (!xen_domain()) | 528 | if (!xen_domain()) |
483 | return -ENODEV; | 529 | return -ENODEV; |
484 | 530 | ||
531 | port_user = kcalloc(NR_EVENT_CHANNELS, sizeof(*port_user), GFP_KERNEL); | ||
532 | if (port_user == NULL) | ||
533 | return -ENOMEM; | ||
534 | |||
485 | spin_lock_init(&port_user_lock); | 535 | spin_lock_init(&port_user_lock); |
486 | memset(port_user, 0, sizeof(port_user)); | ||
487 | 536 | ||
488 | /* Create '/dev/misc/evtchn'. */ | 537 | /* Create '/dev/misc/evtchn'. */ |
489 | err = misc_register(&evtchn_miscdev); | 538 | err = misc_register(&evtchn_miscdev); |
@@ -499,6 +548,9 @@ static int __init evtchn_init(void) | |||
499 | 548 | ||
500 | static void __exit evtchn_cleanup(void) | 549 | static void __exit evtchn_cleanup(void) |
501 | { | 550 | { |
551 | kfree(port_user); | ||
552 | port_user = NULL; | ||
553 | |||
502 | misc_deregister(&evtchn_miscdev); | 554 | misc_deregister(&evtchn_miscdev); |
503 | } | 555 | } |
504 | 556 | ||
diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/xenfs/privcmd.c index f80be7f6eb95..dbd3b16fd131 100644 --- a/drivers/xen/xenfs/privcmd.c +++ b/drivers/xen/xenfs/privcmd.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/mman.h> | 15 | #include <linux/mman.h> |
16 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
17 | #include <linux/swap.h> | 17 | #include <linux/swap.h> |
18 | #include <linux/smp_lock.h> | ||
19 | #include <linux/highmem.h> | 18 | #include <linux/highmem.h> |
20 | #include <linux/pagemap.h> | 19 | #include <linux/pagemap.h> |
21 | #include <linux/seq_file.h> | 20 | #include <linux/seq_file.h> |
@@ -266,9 +265,7 @@ static int mmap_return_errors(void *data, void *state) | |||
266 | xen_pfn_t *mfnp = data; | 265 | xen_pfn_t *mfnp = data; |
267 | struct mmap_batch_state *st = state; | 266 | struct mmap_batch_state *st = state; |
268 | 267 | ||
269 | put_user(*mfnp, st->user++); | 268 | return put_user(*mfnp, st->user++); |
270 | |||
271 | return 0; | ||
272 | } | 269 | } |
273 | 270 | ||
274 | static struct vm_operations_struct privcmd_vm_ops; | 271 | static struct vm_operations_struct privcmd_vm_ops; |
@@ -323,10 +320,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata) | |||
323 | up_write(&mm->mmap_sem); | 320 | up_write(&mm->mmap_sem); |
324 | 321 | ||
325 | if (state.err > 0) { | 322 | if (state.err > 0) { |
326 | ret = 0; | ||
327 | |||
328 | state.user = m.arr; | 323 | state.user = m.arr; |
329 | traverse_pages(m.num, sizeof(xen_pfn_t), | 324 | ret = traverse_pages(m.num, sizeof(xen_pfn_t), |
330 | &pagelist, | 325 | &pagelist, |
331 | mmap_return_errors, &state); | 326 | mmap_return_errors, &state); |
332 | } | 327 | } |
@@ -384,8 +379,9 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) | |||
384 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 379 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
385 | return -ENOSYS; | 380 | return -ENOSYS; |
386 | 381 | ||
387 | /* DONTCOPY is essential for Xen as copy_page_range is broken. */ | 382 | /* DONTCOPY is essential for Xen because copy_page_range doesn't know |
388 | vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY; | 383 | * how to recreate these mappings */ |
384 | vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP; | ||
389 | vma->vm_ops = &privcmd_vm_ops; | 385 | vma->vm_ops = &privcmd_vm_ops; |
390 | vma->vm_private_data = NULL; | 386 | vma->vm_private_data = NULL; |
391 | 387 | ||
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c index f6339d11d59c..1aa389719846 100644 --- a/drivers/xen/xenfs/super.c +++ b/drivers/xen/xenfs/super.c | |||
@@ -12,8 +12,6 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
14 | #include <linux/magic.h> | 14 | #include <linux/magic.h> |
15 | #include <linux/mm.h> | ||
16 | #include <linux/backing-dev.h> | ||
17 | 15 | ||
18 | #include <xen/xen.h> | 16 | #include <xen/xen.h> |
19 | 17 | ||
@@ -24,28 +22,12 @@ | |||
24 | MODULE_DESCRIPTION("Xen filesystem"); | 22 | MODULE_DESCRIPTION("Xen filesystem"); |
25 | MODULE_LICENSE("GPL"); | 23 | MODULE_LICENSE("GPL"); |
26 | 24 | ||
27 | static int xenfs_set_page_dirty(struct page *page) | ||
28 | { | ||
29 | return !TestSetPageDirty(page); | ||
30 | } | ||
31 | |||
32 | static const struct address_space_operations xenfs_aops = { | ||
33 | .set_page_dirty = xenfs_set_page_dirty, | ||
34 | }; | ||
35 | |||
36 | static struct backing_dev_info xenfs_backing_dev_info = { | ||
37 | .ra_pages = 0, /* No readahead */ | ||
38 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, | ||
39 | }; | ||
40 | |||
41 | static struct inode *xenfs_make_inode(struct super_block *sb, int mode) | 25 | static struct inode *xenfs_make_inode(struct super_block *sb, int mode) |
42 | { | 26 | { |
43 | struct inode *ret = new_inode(sb); | 27 | struct inode *ret = new_inode(sb); |
44 | 28 | ||
45 | if (ret) { | 29 | if (ret) { |
46 | ret->i_mode = mode; | 30 | ret->i_mode = mode; |
47 | ret->i_mapping->a_ops = &xenfs_aops; | ||
48 | ret->i_mapping->backing_dev_info = &xenfs_backing_dev_info; | ||
49 | ret->i_uid = ret->i_gid = 0; | 31 | ret->i_uid = ret->i_gid = 0; |
50 | ret->i_blocks = 0; | 32 | ret->i_blocks = 0; |
51 | ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; | 33 | ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; |
@@ -121,9 +103,9 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent) | |||
121 | return rc; | 103 | return rc; |
122 | } | 104 | } |
123 | 105 | ||
124 | static int xenfs_mount(struct file_system_type *fs_type, | 106 | static struct dentry *xenfs_mount(struct file_system_type *fs_type, |
125 | int flags, const char *dev_name, | 107 | int flags, const char *dev_name, |
126 | void *data) | 108 | void *data) |
127 | { | 109 | { |
128 | return mount_single(fs_type, flags, data, xenfs_fill_super); | 110 | return mount_single(fs_type, flags, data, xenfs_fill_super); |
129 | } | 111 | } |
@@ -137,25 +119,11 @@ static struct file_system_type xenfs_type = { | |||
137 | 119 | ||
138 | static int __init xenfs_init(void) | 120 | static int __init xenfs_init(void) |
139 | { | 121 | { |
140 | int err; | 122 | if (xen_domain()) |
141 | if (!xen_domain()) { | 123 | return register_filesystem(&xenfs_type); |
142 | printk(KERN_INFO "xenfs: not registering filesystem on non-xen platform\n"); | ||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | err = register_filesystem(&xenfs_type); | ||
147 | if (err) { | ||
148 | printk(KERN_ERR "xenfs: Unable to register filesystem!\n"); | ||
149 | goto out; | ||
150 | } | ||
151 | |||
152 | err = bdi_init(&xenfs_backing_dev_info); | ||
153 | if (err) | ||
154 | unregister_filesystem(&xenfs_type); | ||
155 | |||
156 | out: | ||
157 | 124 | ||
158 | return err; | 125 | printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n"); |
126 | return 0; | ||
159 | } | 127 | } |
160 | 128 | ||
161 | static void __exit xenfs_exit(void) | 129 | static void __exit xenfs_exit(void) |
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c index cafc50454292..e0c84725d3e9 100644 --- a/drivers/zorro/proc.c +++ b/drivers/zorro/proc.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/proc_fs.h> | 13 | #include <linux/proc_fs.h> |
14 | #include <linux/seq_file.h> | 14 | #include <linux/seq_file.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/smp_lock.h> | ||
17 | #include <asm/uaccess.h> | 16 | #include <asm/uaccess.h> |
18 | #include <asm/amigahw.h> | 17 | #include <asm/amigahw.h> |
19 | #include <asm/setup.h> | 18 | #include <asm/setup.h> |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 06e8ff12b97c..4230252fd689 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/kmod.h> | 12 | #include <linux/kmod.h> |
13 | #include <linux/major.h> | 13 | #include <linux/major.h> |
14 | #include <linux/smp_lock.h> | ||
15 | #include <linux/device_cgroup.h> | 14 | #include <linux/device_cgroup.h> |
16 | #include <linux/highmem.h> | 15 | #include <linux/highmem.h> |
17 | #include <linux/blkdev.h> | 16 | #include <linux/blkdev.h> |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index e9c874abc9e1..561438b6a50c 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -204,7 +204,7 @@ static int readpage_nounlock(struct file *filp, struct page *page) | |||
204 | err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, | 204 | err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, |
205 | page->index << PAGE_CACHE_SHIFT, &len, | 205 | page->index << PAGE_CACHE_SHIFT, &len, |
206 | ci->i_truncate_seq, ci->i_truncate_size, | 206 | ci->i_truncate_seq, ci->i_truncate_size, |
207 | &page, 1); | 207 | &page, 1, 0); |
208 | if (err == -ENOENT) | 208 | if (err == -ENOENT) |
209 | err = 0; | 209 | err = 0; |
210 | if (err < 0) { | 210 | if (err < 0) { |
@@ -287,7 +287,7 @@ static int ceph_readpages(struct file *file, struct address_space *mapping, | |||
287 | rc = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, | 287 | rc = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, |
288 | offset, &len, | 288 | offset, &len, |
289 | ci->i_truncate_seq, ci->i_truncate_size, | 289 | ci->i_truncate_seq, ci->i_truncate_size, |
290 | pages, nr_pages); | 290 | pages, nr_pages, 0); |
291 | if (rc == -ENOENT) | 291 | if (rc == -ENOENT) |
292 | rc = 0; | 292 | rc = 0; |
293 | if (rc < 0) | 293 | if (rc < 0) |
@@ -774,7 +774,7 @@ get_more_pages: | |||
774 | snapc, do_sync, | 774 | snapc, do_sync, |
775 | ci->i_truncate_seq, | 775 | ci->i_truncate_seq, |
776 | ci->i_truncate_size, | 776 | ci->i_truncate_size, |
777 | &inode->i_mtime, true, 1); | 777 | &inode->i_mtime, true, 1, 0); |
778 | max_pages = req->r_num_pages; | 778 | max_pages = req->r_num_pages; |
779 | 779 | ||
780 | alloc_page_vec(fsc, req); | 780 | alloc_page_vec(fsc, req); |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 98ab13e2b71d..60d27bc9eb83 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -1430,8 +1430,8 @@ static int try_nonblocking_invalidate(struct inode *inode) | |||
1430 | invalidating_gen == ci->i_rdcache_gen) { | 1430 | invalidating_gen == ci->i_rdcache_gen) { |
1431 | /* success. */ | 1431 | /* success. */ |
1432 | dout("try_nonblocking_invalidate %p success\n", inode); | 1432 | dout("try_nonblocking_invalidate %p success\n", inode); |
1433 | ci->i_rdcache_gen = 0; | 1433 | /* save any racing async invalidate some trouble */ |
1434 | ci->i_rdcache_revoking = 0; | 1434 | ci->i_rdcache_revoking = ci->i_rdcache_gen - 1; |
1435 | return 0; | 1435 | return 0; |
1436 | } | 1436 | } |
1437 | dout("try_nonblocking_invalidate %p failed\n", inode); | 1437 | dout("try_nonblocking_invalidate %p failed\n", inode); |
@@ -2273,8 +2273,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, | |||
2273 | { | 2273 | { |
2274 | struct ceph_inode_info *ci = ceph_inode(inode); | 2274 | struct ceph_inode_info *ci = ceph_inode(inode); |
2275 | int mds = session->s_mds; | 2275 | int mds = session->s_mds; |
2276 | unsigned seq = le32_to_cpu(grant->seq); | 2276 | int seq = le32_to_cpu(grant->seq); |
2277 | unsigned issue_seq = le32_to_cpu(grant->issue_seq); | ||
2278 | int newcaps = le32_to_cpu(grant->caps); | 2277 | int newcaps = le32_to_cpu(grant->caps); |
2279 | int issued, implemented, used, wanted, dirty; | 2278 | int issued, implemented, used, wanted, dirty; |
2280 | u64 size = le64_to_cpu(grant->size); | 2279 | u64 size = le64_to_cpu(grant->size); |
@@ -2286,8 +2285,8 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, | |||
2286 | int revoked_rdcache = 0; | 2285 | int revoked_rdcache = 0; |
2287 | int queue_invalidate = 0; | 2286 | int queue_invalidate = 0; |
2288 | 2287 | ||
2289 | dout("handle_cap_grant inode %p cap %p mds%d seq %u/%u %s\n", | 2288 | dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n", |
2290 | inode, cap, mds, seq, issue_seq, ceph_cap_string(newcaps)); | 2289 | inode, cap, mds, seq, ceph_cap_string(newcaps)); |
2291 | dout(" size %llu max_size %llu, i_size %llu\n", size, max_size, | 2290 | dout(" size %llu max_size %llu, i_size %llu\n", size, max_size, |
2292 | inode->i_size); | 2291 | inode->i_size); |
2293 | 2292 | ||
@@ -2383,7 +2382,6 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, | |||
2383 | } | 2382 | } |
2384 | 2383 | ||
2385 | cap->seq = seq; | 2384 | cap->seq = seq; |
2386 | cap->issue_seq = issue_seq; | ||
2387 | 2385 | ||
2388 | /* file layout may have changed */ | 2386 | /* file layout may have changed */ |
2389 | ci->i_layout = grant->layout; | 2387 | ci->i_layout = grant->layout; |
@@ -2691,6 +2689,11 @@ static void handle_cap_import(struct ceph_mds_client *mdsc, | |||
2691 | NULL /* no caps context */); | 2689 | NULL /* no caps context */); |
2692 | try_flush_caps(inode, session, NULL); | 2690 | try_flush_caps(inode, session, NULL); |
2693 | up_read(&mdsc->snap_rwsem); | 2691 | up_read(&mdsc->snap_rwsem); |
2692 | |||
2693 | /* make sure we re-request max_size, if necessary */ | ||
2694 | spin_lock(&inode->i_lock); | ||
2695 | ci->i_requested_max_size = 0; | ||
2696 | spin_unlock(&inode->i_lock); | ||
2694 | } | 2697 | } |
2695 | 2698 | ||
2696 | /* | 2699 | /* |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index e0a2dc6fcafc..7d447af84ec4 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
@@ -336,7 +336,10 @@ more: | |||
336 | if (req->r_reply_info.dir_end) { | 336 | if (req->r_reply_info.dir_end) { |
337 | kfree(fi->last_name); | 337 | kfree(fi->last_name); |
338 | fi->last_name = NULL; | 338 | fi->last_name = NULL; |
339 | fi->next_offset = 2; | 339 | if (ceph_frag_is_rightmost(frag)) |
340 | fi->next_offset = 2; | ||
341 | else | ||
342 | fi->next_offset = 0; | ||
340 | } else { | 343 | } else { |
341 | rinfo = &req->r_reply_info; | 344 | rinfo = &req->r_reply_info; |
342 | err = note_last_dentry(fi, | 345 | err = note_last_dentry(fi, |
@@ -355,18 +358,22 @@ more: | |||
355 | u64 pos = ceph_make_fpos(frag, off); | 358 | u64 pos = ceph_make_fpos(frag, off); |
356 | struct ceph_mds_reply_inode *in = | 359 | struct ceph_mds_reply_inode *in = |
357 | rinfo->dir_in[off - fi->offset].in; | 360 | rinfo->dir_in[off - fi->offset].in; |
361 | struct ceph_vino vino; | ||
362 | ino_t ino; | ||
363 | |||
358 | dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n", | 364 | dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n", |
359 | off, off - fi->offset, rinfo->dir_nr, pos, | 365 | off, off - fi->offset, rinfo->dir_nr, pos, |
360 | rinfo->dir_dname_len[off - fi->offset], | 366 | rinfo->dir_dname_len[off - fi->offset], |
361 | rinfo->dir_dname[off - fi->offset], in); | 367 | rinfo->dir_dname[off - fi->offset], in); |
362 | BUG_ON(!in); | 368 | BUG_ON(!in); |
363 | ftype = le32_to_cpu(in->mode) >> 12; | 369 | ftype = le32_to_cpu(in->mode) >> 12; |
370 | vino.ino = le64_to_cpu(in->ino); | ||
371 | vino.snap = le64_to_cpu(in->snapid); | ||
372 | ino = ceph_vino_to_ino(vino); | ||
364 | if (filldir(dirent, | 373 | if (filldir(dirent, |
365 | rinfo->dir_dname[off - fi->offset], | 374 | rinfo->dir_dname[off - fi->offset], |
366 | rinfo->dir_dname_len[off - fi->offset], | 375 | rinfo->dir_dname_len[off - fi->offset], |
367 | pos, | 376 | pos, ino, ftype) < 0) { |
368 | le64_to_cpu(in->ino), | ||
369 | ftype) < 0) { | ||
370 | dout("filldir stopping us...\n"); | 377 | dout("filldir stopping us...\n"); |
371 | return 0; | 378 | return 0; |
372 | } | 379 | } |
@@ -414,6 +421,7 @@ static void reset_readdir(struct ceph_file_info *fi) | |||
414 | fi->last_readdir = NULL; | 421 | fi->last_readdir = NULL; |
415 | } | 422 | } |
416 | kfree(fi->last_name); | 423 | kfree(fi->last_name); |
424 | fi->last_name = NULL; | ||
417 | fi->next_offset = 2; /* compensate for . and .. */ | 425 | fi->next_offset = 2; /* compensate for . and .. */ |
418 | if (fi->dentry) { | 426 | if (fi->dentry) { |
419 | dput(fi->dentry); | 427 | dput(fi->dentry); |
diff --git a/fs/ceph/file.c b/fs/ceph/file.c index e77c28cf3690..8d79b8912e31 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c | |||
@@ -154,11 +154,13 @@ int ceph_open(struct inode *inode, struct file *file) | |||
154 | } | 154 | } |
155 | 155 | ||
156 | /* | 156 | /* |
157 | * No need to block if we have any caps. Update wanted set | 157 | * No need to block if we have caps on the auth MDS (for |
158 | * write) or any MDS (for read). Update wanted set | ||
158 | * asynchronously. | 159 | * asynchronously. |
159 | */ | 160 | */ |
160 | spin_lock(&inode->i_lock); | 161 | spin_lock(&inode->i_lock); |
161 | if (__ceph_is_any_real_caps(ci)) { | 162 | if (__ceph_is_any_real_caps(ci) && |
163 | (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) { | ||
162 | int mds_wanted = __ceph_caps_mds_wanted(ci); | 164 | int mds_wanted = __ceph_caps_mds_wanted(ci); |
163 | int issued = __ceph_caps_issued(ci, NULL); | 165 | int issued = __ceph_caps_issued(ci, NULL); |
164 | 166 | ||
@@ -280,11 +282,12 @@ int ceph_release(struct inode *inode, struct file *file) | |||
280 | static int striped_read(struct inode *inode, | 282 | static int striped_read(struct inode *inode, |
281 | u64 off, u64 len, | 283 | u64 off, u64 len, |
282 | struct page **pages, int num_pages, | 284 | struct page **pages, int num_pages, |
283 | int *checkeof) | 285 | int *checkeof, bool align_to_pages) |
284 | { | 286 | { |
285 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); | 287 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); |
286 | struct ceph_inode_info *ci = ceph_inode(inode); | 288 | struct ceph_inode_info *ci = ceph_inode(inode); |
287 | u64 pos, this_len; | 289 | u64 pos, this_len; |
290 | int io_align, page_align; | ||
288 | int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */ | 291 | int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */ |
289 | int left, pages_left; | 292 | int left, pages_left; |
290 | int read; | 293 | int read; |
@@ -300,14 +303,19 @@ static int striped_read(struct inode *inode, | |||
300 | page_pos = pages; | 303 | page_pos = pages; |
301 | pages_left = num_pages; | 304 | pages_left = num_pages; |
302 | read = 0; | 305 | read = 0; |
306 | io_align = off & ~PAGE_MASK; | ||
303 | 307 | ||
304 | more: | 308 | more: |
309 | if (align_to_pages) | ||
310 | page_align = (pos - io_align) & ~PAGE_MASK; | ||
311 | else | ||
312 | page_align = pos & ~PAGE_MASK; | ||
305 | this_len = left; | 313 | this_len = left; |
306 | ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), | 314 | ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), |
307 | &ci->i_layout, pos, &this_len, | 315 | &ci->i_layout, pos, &this_len, |
308 | ci->i_truncate_seq, | 316 | ci->i_truncate_seq, |
309 | ci->i_truncate_size, | 317 | ci->i_truncate_size, |
310 | page_pos, pages_left); | 318 | page_pos, pages_left, page_align); |
311 | hit_stripe = this_len < left; | 319 | hit_stripe = this_len < left; |
312 | was_short = ret >= 0 && ret < this_len; | 320 | was_short = ret >= 0 && ret < this_len; |
313 | if (ret == -ENOENT) | 321 | if (ret == -ENOENT) |
@@ -374,26 +382,25 @@ static ssize_t ceph_sync_read(struct file *file, char __user *data, | |||
374 | dout("sync_read on file %p %llu~%u %s\n", file, off, len, | 382 | dout("sync_read on file %p %llu~%u %s\n", file, off, len, |
375 | (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); | 383 | (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); |
376 | 384 | ||
377 | if (file->f_flags & O_DIRECT) { | 385 | if (file->f_flags & O_DIRECT) |
378 | pages = ceph_get_direct_page_vector(data, num_pages, off, len); | 386 | pages = ceph_get_direct_page_vector(data, num_pages); |
379 | 387 | else | |
380 | /* | ||
381 | * flush any page cache pages in this range. this | ||
382 | * will make concurrent normal and O_DIRECT io slow, | ||
383 | * but it will at least behave sensibly when they are | ||
384 | * in sequence. | ||
385 | */ | ||
386 | } else { | ||
387 | pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); | 388 | pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); |
388 | } | ||
389 | if (IS_ERR(pages)) | 389 | if (IS_ERR(pages)) |
390 | return PTR_ERR(pages); | 390 | return PTR_ERR(pages); |
391 | 391 | ||
392 | /* | ||
393 | * flush any page cache pages in this range. this | ||
394 | * will make concurrent normal and sync io slow, | ||
395 | * but it will at least behave sensibly when they are | ||
396 | * in sequence. | ||
397 | */ | ||
392 | ret = filemap_write_and_wait(inode->i_mapping); | 398 | ret = filemap_write_and_wait(inode->i_mapping); |
393 | if (ret < 0) | 399 | if (ret < 0) |
394 | goto done; | 400 | goto done; |
395 | 401 | ||
396 | ret = striped_read(inode, off, len, pages, num_pages, checkeof); | 402 | ret = striped_read(inode, off, len, pages, num_pages, checkeof, |
403 | file->f_flags & O_DIRECT); | ||
397 | 404 | ||
398 | if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) | 405 | if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) |
399 | ret = ceph_copy_page_vector_to_user(pages, data, off, ret); | 406 | ret = ceph_copy_page_vector_to_user(pages, data, off, ret); |
@@ -448,6 +455,7 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data, | |||
448 | int flags; | 455 | int flags; |
449 | int do_sync = 0; | 456 | int do_sync = 0; |
450 | int check_caps = 0; | 457 | int check_caps = 0; |
458 | int page_align, io_align; | ||
451 | int ret; | 459 | int ret; |
452 | struct timespec mtime = CURRENT_TIME; | 460 | struct timespec mtime = CURRENT_TIME; |
453 | 461 | ||
@@ -462,6 +470,8 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data, | |||
462 | else | 470 | else |
463 | pos = *offset; | 471 | pos = *offset; |
464 | 472 | ||
473 | io_align = pos & ~PAGE_MASK; | ||
474 | |||
465 | ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left); | 475 | ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left); |
466 | if (ret < 0) | 476 | if (ret < 0) |
467 | return ret; | 477 | return ret; |
@@ -486,20 +496,26 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data, | |||
486 | */ | 496 | */ |
487 | more: | 497 | more: |
488 | len = left; | 498 | len = left; |
499 | if (file->f_flags & O_DIRECT) | ||
500 | /* write from beginning of first page, regardless of | ||
501 | io alignment */ | ||
502 | page_align = (pos - io_align) & ~PAGE_MASK; | ||
503 | else | ||
504 | page_align = pos & ~PAGE_MASK; | ||
489 | req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, | 505 | req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, |
490 | ceph_vino(inode), pos, &len, | 506 | ceph_vino(inode), pos, &len, |
491 | CEPH_OSD_OP_WRITE, flags, | 507 | CEPH_OSD_OP_WRITE, flags, |
492 | ci->i_snap_realm->cached_context, | 508 | ci->i_snap_realm->cached_context, |
493 | do_sync, | 509 | do_sync, |
494 | ci->i_truncate_seq, ci->i_truncate_size, | 510 | ci->i_truncate_seq, ci->i_truncate_size, |
495 | &mtime, false, 2); | 511 | &mtime, false, 2, page_align); |
496 | if (!req) | 512 | if (!req) |
497 | return -ENOMEM; | 513 | return -ENOMEM; |
498 | 514 | ||
499 | num_pages = calc_pages_for(pos, len); | 515 | num_pages = calc_pages_for(pos, len); |
500 | 516 | ||
501 | if (file->f_flags & O_DIRECT) { | 517 | if (file->f_flags & O_DIRECT) { |
502 | pages = ceph_get_direct_page_vector(data, num_pages, pos, len); | 518 | pages = ceph_get_direct_page_vector(data, num_pages); |
503 | if (IS_ERR(pages)) { | 519 | if (IS_ERR(pages)) { |
504 | ret = PTR_ERR(pages); | 520 | ret = PTR_ERR(pages); |
505 | goto out; | 521 | goto out; |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 1d6a45b5a04c..bf1286588f26 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
@@ -2,7 +2,6 @@ | |||
2 | 2 | ||
3 | #include <linux/module.h> | 3 | #include <linux/module.h> |
4 | #include <linux/fs.h> | 4 | #include <linux/fs.h> |
5 | #include <linux/smp_lock.h> | ||
6 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
7 | #include <linux/string.h> | 6 | #include <linux/string.h> |
8 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
@@ -471,7 +470,9 @@ void ceph_fill_file_time(struct inode *inode, int issued, | |||
471 | 470 | ||
472 | if (issued & (CEPH_CAP_FILE_EXCL| | 471 | if (issued & (CEPH_CAP_FILE_EXCL| |
473 | CEPH_CAP_FILE_WR| | 472 | CEPH_CAP_FILE_WR| |
474 | CEPH_CAP_FILE_BUFFER)) { | 473 | CEPH_CAP_FILE_BUFFER| |
474 | CEPH_CAP_AUTH_EXCL| | ||
475 | CEPH_CAP_XATTR_EXCL)) { | ||
475 | if (timespec_compare(ctime, &inode->i_ctime) > 0) { | 476 | if (timespec_compare(ctime, &inode->i_ctime) > 0) { |
476 | dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", | 477 | dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", |
477 | inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, | 478 | inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, |
@@ -511,7 +512,7 @@ void ceph_fill_file_time(struct inode *inode, int issued, | |||
511 | warn = 1; | 512 | warn = 1; |
512 | } | 513 | } |
513 | } else { | 514 | } else { |
514 | /* we have no write caps; whatever the MDS says is true */ | 515 | /* we have no write|excl caps; whatever the MDS says is true */ |
515 | if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { | 516 | if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { |
516 | inode->i_ctime = *ctime; | 517 | inode->i_ctime = *ctime; |
517 | inode->i_mtime = *mtime; | 518 | inode->i_mtime = *mtime; |
@@ -567,12 +568,17 @@ static int fill_inode(struct inode *inode, | |||
567 | 568 | ||
568 | /* | 569 | /* |
569 | * provided version will be odd if inode value is projected, | 570 | * provided version will be odd if inode value is projected, |
570 | * even if stable. skip the update if we have a newer info | 571 | * even if stable. skip the update if we have newer stable |
571 | * (e.g., due to inode info racing form multiple MDSs), or if | 572 | * info (ours>=theirs, e.g. due to racing mds replies), unless |
572 | * we are getting projected (unstable) inode info. | 573 | * we are getting projected (unstable) info (in which case the |
574 | * version is odd, and we want ours>theirs). | ||
575 | * us them | ||
576 | * 2 2 skip | ||
577 | * 3 2 skip | ||
578 | * 3 3 update | ||
573 | */ | 579 | */ |
574 | if (le64_to_cpu(info->version) > 0 && | 580 | if (le64_to_cpu(info->version) > 0 && |
575 | (ci->i_version & ~1) > le64_to_cpu(info->version)) | 581 | (ci->i_version & ~1) >= le64_to_cpu(info->version)) |
576 | goto no_change; | 582 | goto no_change; |
577 | 583 | ||
578 | issued = __ceph_caps_issued(ci, &implemented); | 584 | issued = __ceph_caps_issued(ci, &implemented); |
@@ -606,7 +612,14 @@ static int fill_inode(struct inode *inode, | |||
606 | le32_to_cpu(info->time_warp_seq), | 612 | le32_to_cpu(info->time_warp_seq), |
607 | &ctime, &mtime, &atime); | 613 | &ctime, &mtime, &atime); |
608 | 614 | ||
609 | ci->i_max_size = le64_to_cpu(info->max_size); | 615 | /* only update max_size on auth cap */ |
616 | if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) && | ||
617 | ci->i_max_size != le64_to_cpu(info->max_size)) { | ||
618 | dout("max_size %lld -> %llu\n", ci->i_max_size, | ||
619 | le64_to_cpu(info->max_size)); | ||
620 | ci->i_max_size = le64_to_cpu(info->max_size); | ||
621 | } | ||
622 | |||
610 | ci->i_layout = info->layout; | 623 | ci->i_layout = info->layout; |
611 | inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; | 624 | inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; |
612 | 625 | ||
@@ -1055,7 +1068,8 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
1055 | ininfo = rinfo->targeti.in; | 1068 | ininfo = rinfo->targeti.in; |
1056 | vino.ino = le64_to_cpu(ininfo->ino); | 1069 | vino.ino = le64_to_cpu(ininfo->ino); |
1057 | vino.snap = le64_to_cpu(ininfo->snapid); | 1070 | vino.snap = le64_to_cpu(ininfo->snapid); |
1058 | if (!dn->d_inode) { | 1071 | in = dn->d_inode; |
1072 | if (!in) { | ||
1059 | in = ceph_get_inode(sb, vino); | 1073 | in = ceph_get_inode(sb, vino); |
1060 | if (IS_ERR(in)) { | 1074 | if (IS_ERR(in)) { |
1061 | pr_err("fill_trace bad get_inode " | 1075 | pr_err("fill_trace bad get_inode " |
@@ -1386,11 +1400,8 @@ static void ceph_invalidate_work(struct work_struct *work) | |||
1386 | spin_lock(&inode->i_lock); | 1400 | spin_lock(&inode->i_lock); |
1387 | dout("invalidate_pages %p gen %d revoking %d\n", inode, | 1401 | dout("invalidate_pages %p gen %d revoking %d\n", inode, |
1388 | ci->i_rdcache_gen, ci->i_rdcache_revoking); | 1402 | ci->i_rdcache_gen, ci->i_rdcache_revoking); |
1389 | if (ci->i_rdcache_gen == 0 || | 1403 | if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { |
1390 | ci->i_rdcache_revoking != ci->i_rdcache_gen) { | ||
1391 | BUG_ON(ci->i_rdcache_revoking > ci->i_rdcache_gen); | ||
1392 | /* nevermind! */ | 1404 | /* nevermind! */ |
1393 | ci->i_rdcache_revoking = 0; | ||
1394 | spin_unlock(&inode->i_lock); | 1405 | spin_unlock(&inode->i_lock); |
1395 | goto out; | 1406 | goto out; |
1396 | } | 1407 | } |
@@ -1400,15 +1411,16 @@ static void ceph_invalidate_work(struct work_struct *work) | |||
1400 | ceph_invalidate_nondirty_pages(inode->i_mapping); | 1411 | ceph_invalidate_nondirty_pages(inode->i_mapping); |
1401 | 1412 | ||
1402 | spin_lock(&inode->i_lock); | 1413 | spin_lock(&inode->i_lock); |
1403 | if (orig_gen == ci->i_rdcache_gen) { | 1414 | if (orig_gen == ci->i_rdcache_gen && |
1415 | orig_gen == ci->i_rdcache_revoking) { | ||
1404 | dout("invalidate_pages %p gen %d successful\n", inode, | 1416 | dout("invalidate_pages %p gen %d successful\n", inode, |
1405 | ci->i_rdcache_gen); | 1417 | ci->i_rdcache_gen); |
1406 | ci->i_rdcache_gen = 0; | 1418 | ci->i_rdcache_revoking--; |
1407 | ci->i_rdcache_revoking = 0; | ||
1408 | check = 1; | 1419 | check = 1; |
1409 | } else { | 1420 | } else { |
1410 | dout("invalidate_pages %p gen %d raced, gen now %d\n", | 1421 | dout("invalidate_pages %p gen %d raced, now %d revoking %d\n", |
1411 | inode, orig_gen, ci->i_rdcache_gen); | 1422 | inode, orig_gen, ci->i_rdcache_gen, |
1423 | ci->i_rdcache_revoking); | ||
1412 | } | 1424 | } |
1413 | spin_unlock(&inode->i_lock); | 1425 | spin_unlock(&inode->i_lock); |
1414 | 1426 | ||
@@ -1739,7 +1751,7 @@ int ceph_do_getattr(struct inode *inode, int mask) | |||
1739 | return 0; | 1751 | return 0; |
1740 | } | 1752 | } |
1741 | 1753 | ||
1742 | dout("do_getattr inode %p mask %s\n", inode, ceph_cap_string(mask)); | 1754 | dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode); |
1743 | if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1)) | 1755 | if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1)) |
1744 | return 0; | 1756 | return 0; |
1745 | 1757 | ||
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 3142b15940c2..098b18508479 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -6,7 +6,6 @@ | |||
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <linux/debugfs.h> | 7 | #include <linux/debugfs.h> |
8 | #include <linux/seq_file.h> | 8 | #include <linux/seq_file.h> |
9 | #include <linux/smp_lock.h> | ||
10 | 9 | ||
11 | #include "super.h" | 10 | #include "super.h" |
12 | #include "mds_client.h" | 11 | #include "mds_client.h" |
@@ -529,6 +528,9 @@ static void __register_request(struct ceph_mds_client *mdsc, | |||
529 | ceph_mdsc_get_request(req); | 528 | ceph_mdsc_get_request(req); |
530 | __insert_request(mdsc, req); | 529 | __insert_request(mdsc, req); |
531 | 530 | ||
531 | req->r_uid = current_fsuid(); | ||
532 | req->r_gid = current_fsgid(); | ||
533 | |||
532 | if (dir) { | 534 | if (dir) { |
533 | struct ceph_inode_info *ci = ceph_inode(dir); | 535 | struct ceph_inode_info *ci = ceph_inode(dir); |
534 | 536 | ||
@@ -1588,8 +1590,8 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, | |||
1588 | 1590 | ||
1589 | head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); | 1591 | head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); |
1590 | head->op = cpu_to_le32(req->r_op); | 1592 | head->op = cpu_to_le32(req->r_op); |
1591 | head->caller_uid = cpu_to_le32(current_fsuid()); | 1593 | head->caller_uid = cpu_to_le32(req->r_uid); |
1592 | head->caller_gid = cpu_to_le32(current_fsgid()); | 1594 | head->caller_gid = cpu_to_le32(req->r_gid); |
1593 | head->args = req->r_args; | 1595 | head->args = req->r_args; |
1594 | 1596 | ||
1595 | ceph_encode_filepath(&p, end, ino1, path1); | 1597 | ceph_encode_filepath(&p, end, ino1, path1); |
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index d66d63c72355..9341fd4f1432 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h | |||
@@ -170,6 +170,8 @@ struct ceph_mds_request { | |||
170 | 170 | ||
171 | union ceph_mds_request_args r_args; | 171 | union ceph_mds_request_args r_args; |
172 | int r_fmode; /* file mode, if expecting cap */ | 172 | int r_fmode; /* file mode, if expecting cap */ |
173 | uid_t r_uid; | ||
174 | gid_t r_gid; | ||
173 | 175 | ||
174 | /* for choosing which mds to send this request to */ | 176 | /* for choosing which mds to send this request to */ |
175 | int r_direct_mode; | 177 | int r_direct_mode; |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 1886294e12f7..7f01728a4657 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
@@ -293,9 +293,7 @@ struct ceph_inode_info { | |||
293 | int i_rd_ref, i_rdcache_ref, i_wr_ref; | 293 | int i_rd_ref, i_rdcache_ref, i_wr_ref; |
294 | int i_wrbuffer_ref, i_wrbuffer_ref_head; | 294 | int i_wrbuffer_ref, i_wrbuffer_ref_head; |
295 | u32 i_shared_gen; /* increment each time we get FILE_SHARED */ | 295 | u32 i_shared_gen; /* increment each time we get FILE_SHARED */ |
296 | u32 i_rdcache_gen; /* we increment this each time we get | 296 | u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */ |
297 | FILE_CACHE. If it's non-zero, we | ||
298 | _may_ have cached pages. */ | ||
299 | u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */ | 297 | u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */ |
300 | 298 | ||
301 | struct list_head i_unsafe_writes; /* uncommitted sync writes */ | 299 | struct list_head i_unsafe_writes; /* uncommitted sync writes */ |
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 410ed188faa1..a60579b007b0 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/compiler.h> | 19 | #include <linux/compiler.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/smp.h> | 21 | #include <linux/smp.h> |
22 | #include <linux/smp_lock.h> | ||
23 | #include <linux/ioctl.h> | 22 | #include <linux/ioctl.h> |
24 | #include <linux/if.h> | 23 | #include <linux/if.h> |
25 | #include <linux/if_bridge.h> | 24 | #include <linux/if_bridge.h> |
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c index 253732382d37..2720178b7718 100644 --- a/fs/ecryptfs/super.c +++ b/fs/ecryptfs/super.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/key.h> | 28 | #include <linux/key.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/seq_file.h> | 30 | #include <linux/seq_file.h> |
31 | #include <linux/smp_lock.h> | ||
32 | #include <linux/file.h> | 31 | #include <linux/file.h> |
33 | #include <linux/crypto.h> | 32 | #include <linux/crypto.h> |
34 | #include "ecryptfs_kernel.h" | 33 | #include "ecryptfs_kernel.h" |
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 2fedaf8b5012..acf8695fa8f0 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/blkdev.h> | 28 | #include <linux/blkdev.h> |
29 | #include <linux/parser.h> | 29 | #include <linux/parser.h> |
30 | #include <linux/smp_lock.h> | ||
31 | #include <linux/buffer_head.h> | 30 | #include <linux/buffer_head.h> |
32 | #include <linux/exportfs.h> | 31 | #include <linux/exportfs.h> |
33 | #include <linux/vfs.h> | 32 | #include <linux/vfs.h> |
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index bf5ae883b1bd..eb3bc2fe647e 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c | |||
@@ -331,6 +331,30 @@ mext_out: | |||
331 | return err; | 331 | return err; |
332 | } | 332 | } |
333 | 333 | ||
334 | case FITRIM: | ||
335 | { | ||
336 | struct super_block *sb = inode->i_sb; | ||
337 | struct fstrim_range range; | ||
338 | int ret = 0; | ||
339 | |||
340 | if (!capable(CAP_SYS_ADMIN)) | ||
341 | return -EPERM; | ||
342 | |||
343 | if (copy_from_user(&range, (struct fstrim_range *)arg, | ||
344 | sizeof(range))) | ||
345 | return -EFAULT; | ||
346 | |||
347 | ret = ext4_trim_fs(sb, &range); | ||
348 | if (ret < 0) | ||
349 | return ret; | ||
350 | |||
351 | if (copy_to_user((struct fstrim_range *)arg, &range, | ||
352 | sizeof(range))) | ||
353 | return -EFAULT; | ||
354 | |||
355 | return 0; | ||
356 | } | ||
357 | |||
334 | default: | 358 | default: |
335 | return -ENOTTY; | 359 | return -ENOTTY; |
336 | } | 360 | } |
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 7f5451cd1d38..beacce11ac50 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -237,8 +237,6 @@ static void ext4_end_bio(struct bio *bio, int error) | |||
237 | } while (bh != head); | 237 | } while (bh != head); |
238 | } | 238 | } |
239 | 239 | ||
240 | put_io_page(io_end->pages[i]); | ||
241 | |||
242 | /* | 240 | /* |
243 | * If this is a partial write which happened to make | 241 | * If this is a partial write which happened to make |
244 | * all buffers uptodate then we can optimize away a | 242 | * all buffers uptodate then we can optimize away a |
@@ -248,6 +246,8 @@ static void ext4_end_bio(struct bio *bio, int error) | |||
248 | */ | 246 | */ |
249 | if (!partial_write) | 247 | if (!partial_write) |
250 | SetPageUptodate(page); | 248 | SetPageUptodate(page); |
249 | |||
250 | put_io_page(io_end->pages[i]); | ||
251 | } | 251 | } |
252 | io_end->num_io_pages = 0; | 252 | io_end->num_io_pages = 0; |
253 | inode = io_end->inode; | 253 | inode = io_end->inode; |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 61182fe6254e..e32195d6aac3 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -1197,7 +1197,6 @@ static const struct super_operations ext4_sops = { | |||
1197 | .quota_write = ext4_quota_write, | 1197 | .quota_write = ext4_quota_write, |
1198 | #endif | 1198 | #endif |
1199 | .bdev_try_to_free_page = bdev_try_to_free_page, | 1199 | .bdev_try_to_free_page = bdev_try_to_free_page, |
1200 | .trim_fs = ext4_trim_fs | ||
1201 | }; | 1200 | }; |
1202 | 1201 | ||
1203 | static const struct super_operations ext4_nojournal_sops = { | 1202 | static const struct super_operations ext4_nojournal_sops = { |
@@ -2799,9 +2798,6 @@ static void ext4_clear_request_list(void) | |||
2799 | struct ext4_li_request *elr; | 2798 | struct ext4_li_request *elr; |
2800 | 2799 | ||
2801 | mutex_lock(&ext4_li_info->li_list_mtx); | 2800 | mutex_lock(&ext4_li_info->li_list_mtx); |
2802 | if (list_empty(&ext4_li_info->li_request_list)) | ||
2803 | return; | ||
2804 | |||
2805 | list_for_each_safe(pos, n, &ext4_li_info->li_request_list) { | 2801 | list_for_each_safe(pos, n, &ext4_li_info->li_request_list) { |
2806 | elr = list_entry(pos, struct ext4_li_request, | 2802 | elr = list_entry(pos, struct ext4_li_request, |
2807 | lr_request); | 2803 | lr_request); |
@@ -3268,13 +3264,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3268 | * Test whether we have more sectors than will fit in sector_t, | 3264 | * Test whether we have more sectors than will fit in sector_t, |
3269 | * and whether the max offset is addressable by the page cache. | 3265 | * and whether the max offset is addressable by the page cache. |
3270 | */ | 3266 | */ |
3271 | ret = generic_check_addressable(sb->s_blocksize_bits, | 3267 | err = generic_check_addressable(sb->s_blocksize_bits, |
3272 | ext4_blocks_count(es)); | 3268 | ext4_blocks_count(es)); |
3273 | if (ret) { | 3269 | if (err) { |
3274 | ext4_msg(sb, KERN_ERR, "filesystem" | 3270 | ext4_msg(sb, KERN_ERR, "filesystem" |
3275 | " too large to mount safely on this system"); | 3271 | " too large to mount safely on this system"); |
3276 | if (sizeof(sector_t) < 8) | 3272 | if (sizeof(sector_t) < 8) |
3277 | ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled"); | 3273 | ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled"); |
3274 | ret = err; | ||
3278 | goto failed_mount; | 3275 | goto failed_mount; |
3279 | } | 3276 | } |
3280 | 3277 | ||
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index c8224587123f..9242d294fe90 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -134,6 +134,7 @@ EXPORT_SYMBOL_GPL(fuse_do_open); | |||
134 | void fuse_finish_open(struct inode *inode, struct file *file) | 134 | void fuse_finish_open(struct inode *inode, struct file *file) |
135 | { | 135 | { |
136 | struct fuse_file *ff = file->private_data; | 136 | struct fuse_file *ff = file->private_data; |
137 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
137 | 138 | ||
138 | if (ff->open_flags & FOPEN_DIRECT_IO) | 139 | if (ff->open_flags & FOPEN_DIRECT_IO) |
139 | file->f_op = &fuse_direct_io_file_operations; | 140 | file->f_op = &fuse_direct_io_file_operations; |
@@ -141,6 +142,15 @@ void fuse_finish_open(struct inode *inode, struct file *file) | |||
141 | invalidate_inode_pages2(inode->i_mapping); | 142 | invalidate_inode_pages2(inode->i_mapping); |
142 | if (ff->open_flags & FOPEN_NONSEEKABLE) | 143 | if (ff->open_flags & FOPEN_NONSEEKABLE) |
143 | nonseekable_open(inode, file); | 144 | nonseekable_open(inode, file); |
145 | if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { | ||
146 | struct fuse_inode *fi = get_fuse_inode(inode); | ||
147 | |||
148 | spin_lock(&fc->lock); | ||
149 | fi->attr_version = ++fc->attr_version; | ||
150 | i_size_write(inode, 0); | ||
151 | spin_unlock(&fc->lock); | ||
152 | fuse_invalidate_attr(inode); | ||
153 | } | ||
144 | } | 154 | } |
145 | 155 | ||
146 | int fuse_open_common(struct inode *inode, struct file *file, bool isdir) | 156 | int fuse_open_common(struct inode *inode, struct file *file, bool isdir) |
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c index 06d582732d34..5ab3839dfcb9 100644 --- a/fs/gfs2/export.c +++ b/fs/gfs2/export.c | |||
@@ -138,10 +138,8 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, | |||
138 | struct gfs2_inum_host *inum) | 138 | struct gfs2_inum_host *inum) |
139 | { | 139 | { |
140 | struct gfs2_sbd *sdp = sb->s_fs_info; | 140 | struct gfs2_sbd *sdp = sb->s_fs_info; |
141 | struct gfs2_holder i_gh; | ||
142 | struct inode *inode; | 141 | struct inode *inode; |
143 | struct dentry *dentry; | 142 | struct dentry *dentry; |
144 | int error; | ||
145 | 143 | ||
146 | inode = gfs2_ilookup(sb, inum->no_addr); | 144 | inode = gfs2_ilookup(sb, inum->no_addr); |
147 | if (inode) { | 145 | if (inode) { |
@@ -152,52 +150,16 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, | |||
152 | goto out_inode; | 150 | goto out_inode; |
153 | } | 151 | } |
154 | 152 | ||
155 | error = gfs2_glock_nq_num(sdp, inum->no_addr, &gfs2_inode_glops, | 153 | inode = gfs2_lookup_by_inum(sdp, inum->no_addr, &inum->no_formal_ino, |
156 | LM_ST_SHARED, LM_FLAG_ANY, &i_gh); | 154 | GFS2_BLKST_DINODE); |
157 | if (error) | 155 | if (IS_ERR(inode)) |
158 | return ERR_PTR(error); | 156 | return ERR_CAST(inode); |
159 | |||
160 | error = gfs2_check_blk_type(sdp, inum->no_addr, GFS2_BLKST_DINODE); | ||
161 | if (error) | ||
162 | goto fail; | ||
163 | |||
164 | inode = gfs2_inode_lookup(sb, DT_UNKNOWN, inum->no_addr, 0); | ||
165 | if (IS_ERR(inode)) { | ||
166 | error = PTR_ERR(inode); | ||
167 | goto fail; | ||
168 | } | ||
169 | |||
170 | error = gfs2_inode_refresh(GFS2_I(inode)); | ||
171 | if (error) { | ||
172 | iput(inode); | ||
173 | goto fail; | ||
174 | } | ||
175 | |||
176 | /* Pick up the works we bypass in gfs2_inode_lookup */ | ||
177 | if (inode->i_state & I_NEW) | ||
178 | gfs2_set_iop(inode); | ||
179 | |||
180 | if (GFS2_I(inode)->i_no_formal_ino != inum->no_formal_ino) { | ||
181 | iput(inode); | ||
182 | goto fail; | ||
183 | } | ||
184 | |||
185 | error = -EIO; | ||
186 | if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM) { | ||
187 | iput(inode); | ||
188 | goto fail; | ||
189 | } | ||
190 | |||
191 | gfs2_glock_dq_uninit(&i_gh); | ||
192 | 157 | ||
193 | out_inode: | 158 | out_inode: |
194 | dentry = d_obtain_alias(inode); | 159 | dentry = d_obtain_alias(inode); |
195 | if (!IS_ERR(dentry)) | 160 | if (!IS_ERR(dentry)) |
196 | dentry->d_op = &gfs2_dops; | 161 | dentry->d_op = &gfs2_dops; |
197 | return dentry; | 162 | return dentry; |
198 | fail: | ||
199 | gfs2_glock_dq_uninit(&i_gh); | ||
200 | return ERR_PTR(error); | ||
201 | } | 163 | } |
202 | 164 | ||
203 | static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid, | 165 | static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid, |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 87778857f099..f92c17704169 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -686,21 +686,20 @@ static void delete_work_func(struct work_struct *work) | |||
686 | { | 686 | { |
687 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); | 687 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); |
688 | struct gfs2_sbd *sdp = gl->gl_sbd; | 688 | struct gfs2_sbd *sdp = gl->gl_sbd; |
689 | struct gfs2_inode *ip = NULL; | 689 | struct gfs2_inode *ip; |
690 | struct inode *inode; | 690 | struct inode *inode; |
691 | u64 no_addr = 0; | 691 | u64 no_addr = gl->gl_name.ln_number; |
692 | |||
693 | ip = gl->gl_object; | ||
694 | /* Note: Unsafe to dereference ip as we don't hold right refs/locks */ | ||
692 | 695 | ||
693 | spin_lock(&gl->gl_spin); | ||
694 | ip = (struct gfs2_inode *)gl->gl_object; | ||
695 | if (ip) | 696 | if (ip) |
696 | no_addr = ip->i_no_addr; | ||
697 | spin_unlock(&gl->gl_spin); | ||
698 | if (ip) { | ||
699 | inode = gfs2_ilookup(sdp->sd_vfs, no_addr); | 697 | inode = gfs2_ilookup(sdp->sd_vfs, no_addr); |
700 | if (inode) { | 698 | else |
701 | d_prune_aliases(inode); | 699 | inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED); |
702 | iput(inode); | 700 | if (inode && !IS_ERR(inode)) { |
703 | } | 701 | d_prune_aliases(inode); |
702 | iput(inode); | ||
704 | } | 703 | } |
705 | gfs2_glock_put(gl); | 704 | gfs2_glock_put(gl); |
706 | } | 705 | } |
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 06370f8bd8cf..e1213f7f9217 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c | |||
@@ -73,49 +73,6 @@ static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr) | |||
73 | return iget5_locked(sb, hash, iget_test, iget_set, &no_addr); | 73 | return iget5_locked(sb, hash, iget_test, iget_set, &no_addr); |
74 | } | 74 | } |
75 | 75 | ||
76 | struct gfs2_skip_data { | ||
77 | u64 no_addr; | ||
78 | int skipped; | ||
79 | }; | ||
80 | |||
81 | static int iget_skip_test(struct inode *inode, void *opaque) | ||
82 | { | ||
83 | struct gfs2_inode *ip = GFS2_I(inode); | ||
84 | struct gfs2_skip_data *data = opaque; | ||
85 | |||
86 | if (ip->i_no_addr == data->no_addr) { | ||
87 | if (inode->i_state & (I_FREEING|I_WILL_FREE)){ | ||
88 | data->skipped = 1; | ||
89 | return 0; | ||
90 | } | ||
91 | return 1; | ||
92 | } | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | static int iget_skip_set(struct inode *inode, void *opaque) | ||
97 | { | ||
98 | struct gfs2_inode *ip = GFS2_I(inode); | ||
99 | struct gfs2_skip_data *data = opaque; | ||
100 | |||
101 | if (data->skipped) | ||
102 | return 1; | ||
103 | inode->i_ino = (unsigned long)(data->no_addr); | ||
104 | ip->i_no_addr = data->no_addr; | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static struct inode *gfs2_iget_skip(struct super_block *sb, | ||
109 | u64 no_addr) | ||
110 | { | ||
111 | struct gfs2_skip_data data; | ||
112 | unsigned long hash = (unsigned long)no_addr; | ||
113 | |||
114 | data.no_addr = no_addr; | ||
115 | data.skipped = 0; | ||
116 | return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data); | ||
117 | } | ||
118 | |||
119 | /** | 76 | /** |
120 | * GFS2 lookup code fills in vfs inode contents based on info obtained | 77 | * GFS2 lookup code fills in vfs inode contents based on info obtained |
121 | * from directory entry inside gfs2_inode_lookup(). This has caused issues | 78 | * from directory entry inside gfs2_inode_lookup(). This has caused issues |
@@ -243,93 +200,54 @@ fail: | |||
243 | return ERR_PTR(error); | 200 | return ERR_PTR(error); |
244 | } | 201 | } |
245 | 202 | ||
246 | /** | 203 | struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr, |
247 | * gfs2_process_unlinked_inode - Lookup an unlinked inode for reclamation | 204 | u64 *no_formal_ino, unsigned int blktype) |
248 | * and try to reclaim it by doing iput. | ||
249 | * | ||
250 | * This function assumes no rgrp locks are currently held. | ||
251 | * | ||
252 | * @sb: The super block | ||
253 | * no_addr: The inode number | ||
254 | * | ||
255 | */ | ||
256 | |||
257 | void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr) | ||
258 | { | 205 | { |
259 | struct gfs2_sbd *sdp; | 206 | struct super_block *sb = sdp->sd_vfs; |
260 | struct gfs2_inode *ip; | 207 | struct gfs2_holder i_gh; |
261 | struct gfs2_glock *io_gl = NULL; | ||
262 | int error; | ||
263 | struct gfs2_holder gh; | ||
264 | struct inode *inode; | 208 | struct inode *inode; |
209 | int error; | ||
265 | 210 | ||
266 | inode = gfs2_iget_skip(sb, no_addr); | 211 | error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops, |
267 | 212 | LM_ST_SHARED, LM_FLAG_ANY, &i_gh); | |
268 | if (!inode) | 213 | if (error) |
269 | return; | 214 | return ERR_PTR(error); |
270 | |||
271 | /* If it's not a new inode, someone's using it, so leave it alone. */ | ||
272 | if (!(inode->i_state & I_NEW)) { | ||
273 | iput(inode); | ||
274 | return; | ||
275 | } | ||
276 | |||
277 | ip = GFS2_I(inode); | ||
278 | sdp = GFS2_SB(inode); | ||
279 | ip->i_no_formal_ino = -1; | ||
280 | 215 | ||
281 | error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); | 216 | error = gfs2_check_blk_type(sdp, no_addr, blktype); |
282 | if (unlikely(error)) | 217 | if (error) |
283 | goto fail; | 218 | goto fail; |
284 | ip->i_gl->gl_object = ip; | ||
285 | 219 | ||
286 | error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); | 220 | inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0); |
287 | if (unlikely(error)) | 221 | if (IS_ERR(inode)) |
288 | goto fail_put; | 222 | goto fail; |
289 | |||
290 | set_bit(GIF_INVALID, &ip->i_flags); | ||
291 | error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, LM_FLAG_TRY | GL_EXACT, | ||
292 | &ip->i_iopen_gh); | ||
293 | if (unlikely(error)) | ||
294 | goto fail_iopen; | ||
295 | 223 | ||
296 | ip->i_iopen_gh.gh_gl->gl_object = ip; | 224 | error = gfs2_inode_refresh(GFS2_I(inode)); |
297 | gfs2_glock_put(io_gl); | 225 | if (error) |
298 | io_gl = NULL; | 226 | goto fail_iput; |
299 | 227 | ||
300 | inode->i_mode = DT2IF(DT_UNKNOWN); | 228 | /* Pick up the works we bypass in gfs2_inode_lookup */ |
229 | if (inode->i_state & I_NEW) | ||
230 | gfs2_set_iop(inode); | ||
301 | 231 | ||
302 | /* | 232 | /* Two extra checks for NFS only */ |
303 | * We must read the inode in order to work out its type in | 233 | if (no_formal_ino) { |
304 | * this case. Note that this doesn't happen often as we normally | 234 | error = -ESTALE; |
305 | * know the type beforehand. This code path only occurs during | 235 | if (GFS2_I(inode)->i_no_formal_ino != *no_formal_ino) |
306 | * unlinked inode recovery (where it is safe to do this glock, | 236 | goto fail_iput; |
307 | * which is not true in the general case). | ||
308 | */ | ||
309 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY, | ||
310 | &gh); | ||
311 | if (unlikely(error)) | ||
312 | goto fail_glock; | ||
313 | 237 | ||
314 | /* Inode is now uptodate */ | 238 | error = -EIO; |
315 | gfs2_glock_dq_uninit(&gh); | 239 | if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM) |
316 | gfs2_set_iop(inode); | 240 | goto fail_iput; |
317 | 241 | ||
318 | /* The iput will cause it to be deleted. */ | 242 | error = 0; |
319 | iput(inode); | 243 | } |
320 | return; | ||
321 | 244 | ||
322 | fail_glock: | ||
323 | gfs2_glock_dq(&ip->i_iopen_gh); | ||
324 | fail_iopen: | ||
325 | if (io_gl) | ||
326 | gfs2_glock_put(io_gl); | ||
327 | fail_put: | ||
328 | ip->i_gl->gl_object = NULL; | ||
329 | gfs2_glock_put(ip->i_gl); | ||
330 | fail: | 245 | fail: |
331 | iget_failed(inode); | 246 | gfs2_glock_dq_uninit(&i_gh); |
332 | return; | 247 | return error ? ERR_PTR(error) : inode; |
248 | fail_iput: | ||
249 | iput(inode); | ||
250 | goto fail; | ||
333 | } | 251 | } |
334 | 252 | ||
335 | static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) | 253 | static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) |
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index 6720d7d5fbc6..d8499fadcc53 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h | |||
@@ -99,7 +99,9 @@ err: | |||
99 | extern void gfs2_set_iop(struct inode *inode); | 99 | extern void gfs2_set_iop(struct inode *inode); |
100 | extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, | 100 | extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, |
101 | u64 no_addr, u64 no_formal_ino); | 101 | u64 no_addr, u64 no_formal_ino); |
102 | extern void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr); | 102 | extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr, |
103 | u64 *no_formal_ino, | ||
104 | unsigned int blktype); | ||
103 | extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr); | 105 | extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr); |
104 | 106 | ||
105 | extern int gfs2_inode_refresh(struct gfs2_inode *ip); | 107 | extern int gfs2_inode_refresh(struct gfs2_inode *ip); |
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index bef3ab6cf5c1..33c8407b876f 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c | |||
@@ -963,17 +963,18 @@ static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al) | |||
963 | * The inode, if one has been found, in inode. | 963 | * The inode, if one has been found, in inode. |
964 | */ | 964 | */ |
965 | 965 | ||
966 | static u64 try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, | 966 | static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip) |
967 | u64 skip) | ||
968 | { | 967 | { |
969 | u32 goal = 0, block; | 968 | u32 goal = 0, block; |
970 | u64 no_addr; | 969 | u64 no_addr; |
971 | struct gfs2_sbd *sdp = rgd->rd_sbd; | 970 | struct gfs2_sbd *sdp = rgd->rd_sbd; |
972 | unsigned int n; | 971 | unsigned int n; |
972 | struct gfs2_glock *gl; | ||
973 | struct gfs2_inode *ip; | ||
974 | int error; | ||
975 | int found = 0; | ||
973 | 976 | ||
974 | for(;;) { | 977 | while (goal < rgd->rd_data) { |
975 | if (goal >= rgd->rd_data) | ||
976 | break; | ||
977 | down_write(&sdp->sd_log_flush_lock); | 978 | down_write(&sdp->sd_log_flush_lock); |
978 | n = 1; | 979 | n = 1; |
979 | block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED, | 980 | block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED, |
@@ -990,11 +991,32 @@ static u64 try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, | |||
990 | if (no_addr == skip) | 991 | if (no_addr == skip) |
991 | continue; | 992 | continue; |
992 | *last_unlinked = no_addr; | 993 | *last_unlinked = no_addr; |
993 | return no_addr; | 994 | |
995 | error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &gl); | ||
996 | if (error) | ||
997 | continue; | ||
998 | |||
999 | /* If the inode is already in cache, we can ignore it here | ||
1000 | * because the existing inode disposal code will deal with | ||
1001 | * it when all refs have gone away. Accessing gl_object like | ||
1002 | * this is not safe in general. Here it is ok because we do | ||
1003 | * not dereference the pointer, and we only need an approx | ||
1004 | * answer to whether it is NULL or not. | ||
1005 | */ | ||
1006 | ip = gl->gl_object; | ||
1007 | |||
1008 | if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) | ||
1009 | gfs2_glock_put(gl); | ||
1010 | else | ||
1011 | found++; | ||
1012 | |||
1013 | /* Limit reclaim to sensible number of tasks */ | ||
1014 | if (found > 2*NR_CPUS) | ||
1015 | return; | ||
994 | } | 1016 | } |
995 | 1017 | ||
996 | rgd->rd_flags &= ~GFS2_RDF_CHECK; | 1018 | rgd->rd_flags &= ~GFS2_RDF_CHECK; |
997 | return 0; | 1019 | return; |
998 | } | 1020 | } |
999 | 1021 | ||
1000 | /** | 1022 | /** |
@@ -1075,11 +1097,9 @@ static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd) | |||
1075 | * Try to acquire rgrp in way which avoids contending with others. | 1097 | * Try to acquire rgrp in way which avoids contending with others. |
1076 | * | 1098 | * |
1077 | * Returns: errno | 1099 | * Returns: errno |
1078 | * unlinked: the block address of an unlinked block to be reclaimed | ||
1079 | */ | 1100 | */ |
1080 | 1101 | ||
1081 | static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked, | 1102 | static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) |
1082 | u64 *last_unlinked) | ||
1083 | { | 1103 | { |
1084 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1104 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
1085 | struct gfs2_rgrpd *rgd, *begin = NULL; | 1105 | struct gfs2_rgrpd *rgd, *begin = NULL; |
@@ -1089,7 +1109,6 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked, | |||
1089 | int loops = 0; | 1109 | int loops = 0; |
1090 | int error, rg_locked; | 1110 | int error, rg_locked; |
1091 | 1111 | ||
1092 | *unlinked = 0; | ||
1093 | rgd = gfs2_blk2rgrpd(sdp, ip->i_goal); | 1112 | rgd = gfs2_blk2rgrpd(sdp, ip->i_goal); |
1094 | 1113 | ||
1095 | while (rgd) { | 1114 | while (rgd) { |
@@ -1106,17 +1125,10 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked, | |||
1106 | case 0: | 1125 | case 0: |
1107 | if (try_rgrp_fit(rgd, al)) | 1126 | if (try_rgrp_fit(rgd, al)) |
1108 | goto out; | 1127 | goto out; |
1109 | /* If the rg came in already locked, there's no | 1128 | if (rgd->rd_flags & GFS2_RDF_CHECK) |
1110 | way we can recover from a failed try_rgrp_unlink | 1129 | try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr); |
1111 | because that would require an iput which can only | ||
1112 | happen after the rgrp is unlocked. */ | ||
1113 | if (!rg_locked && rgd->rd_flags & GFS2_RDF_CHECK) | ||
1114 | *unlinked = try_rgrp_unlink(rgd, last_unlinked, | ||
1115 | ip->i_no_addr); | ||
1116 | if (!rg_locked) | 1130 | if (!rg_locked) |
1117 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | 1131 | gfs2_glock_dq_uninit(&al->al_rgd_gh); |
1118 | if (*unlinked) | ||
1119 | return -EAGAIN; | ||
1120 | /* fall through */ | 1132 | /* fall through */ |
1121 | case GLR_TRYFAILED: | 1133 | case GLR_TRYFAILED: |
1122 | rgd = recent_rgrp_next(rgd); | 1134 | rgd = recent_rgrp_next(rgd); |
@@ -1145,13 +1157,10 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked, | |||
1145 | case 0: | 1157 | case 0: |
1146 | if (try_rgrp_fit(rgd, al)) | 1158 | if (try_rgrp_fit(rgd, al)) |
1147 | goto out; | 1159 | goto out; |
1148 | if (!rg_locked && rgd->rd_flags & GFS2_RDF_CHECK) | 1160 | if (rgd->rd_flags & GFS2_RDF_CHECK) |
1149 | *unlinked = try_rgrp_unlink(rgd, last_unlinked, | 1161 | try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr); |
1150 | ip->i_no_addr); | ||
1151 | if (!rg_locked) | 1162 | if (!rg_locked) |
1152 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | 1163 | gfs2_glock_dq_uninit(&al->al_rgd_gh); |
1153 | if (*unlinked) | ||
1154 | return -EAGAIN; | ||
1155 | break; | 1164 | break; |
1156 | 1165 | ||
1157 | case GLR_TRYFAILED: | 1166 | case GLR_TRYFAILED: |
@@ -1204,12 +1213,12 @@ int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex, | |||
1204 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1213 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
1205 | struct gfs2_alloc *al = ip->i_alloc; | 1214 | struct gfs2_alloc *al = ip->i_alloc; |
1206 | int error = 0; | 1215 | int error = 0; |
1207 | u64 last_unlinked = NO_BLOCK, unlinked; | 1216 | u64 last_unlinked = NO_BLOCK; |
1217 | int tries = 0; | ||
1208 | 1218 | ||
1209 | if (gfs2_assert_warn(sdp, al->al_requested)) | 1219 | if (gfs2_assert_warn(sdp, al->al_requested)) |
1210 | return -EINVAL; | 1220 | return -EINVAL; |
1211 | 1221 | ||
1212 | try_again: | ||
1213 | if (hold_rindex) { | 1222 | if (hold_rindex) { |
1214 | /* We need to hold the rindex unless the inode we're using is | 1223 | /* We need to hold the rindex unless the inode we're using is |
1215 | the rindex itself, in which case it's already held. */ | 1224 | the rindex itself, in which case it's already held. */ |
@@ -1218,31 +1227,23 @@ try_again: | |||
1218 | else if (!sdp->sd_rgrps) /* We may not have the rindex read | 1227 | else if (!sdp->sd_rgrps) /* We may not have the rindex read |
1219 | in, so: */ | 1228 | in, so: */ |
1220 | error = gfs2_ri_update_special(ip); | 1229 | error = gfs2_ri_update_special(ip); |
1230 | if (error) | ||
1231 | return error; | ||
1221 | } | 1232 | } |
1222 | 1233 | ||
1223 | if (error) | 1234 | do { |
1224 | return error; | 1235 | error = get_local_rgrp(ip, &last_unlinked); |
1236 | /* If there is no space, flushing the log may release some */ | ||
1237 | if (error) | ||
1238 | gfs2_log_flush(sdp, NULL); | ||
1239 | } while (error && tries++ < 3); | ||
1225 | 1240 | ||
1226 | /* Find an rgrp suitable for allocation. If it encounters any unlinked | ||
1227 | dinodes along the way, error will equal -EAGAIN and unlinked will | ||
1228 | contains it block address. We then need to look up that inode and | ||
1229 | try to free it, and try the allocation again. */ | ||
1230 | error = get_local_rgrp(ip, &unlinked, &last_unlinked); | ||
1231 | if (error) { | 1241 | if (error) { |
1232 | if (hold_rindex && ip != GFS2_I(sdp->sd_rindex)) | 1242 | if (hold_rindex && ip != GFS2_I(sdp->sd_rindex)) |
1233 | gfs2_glock_dq_uninit(&al->al_ri_gh); | 1243 | gfs2_glock_dq_uninit(&al->al_ri_gh); |
1234 | if (error != -EAGAIN) | 1244 | return error; |
1235 | return error; | ||
1236 | |||
1237 | gfs2_process_unlinked_inode(ip->i_inode.i_sb, unlinked); | ||
1238 | /* regardless of whether or not gfs2_process_unlinked_inode | ||
1239 | was successful, we don't want to repeat it again. */ | ||
1240 | last_unlinked = unlinked; | ||
1241 | gfs2_log_flush(sdp, NULL); | ||
1242 | error = 0; | ||
1243 | |||
1244 | goto try_again; | ||
1245 | } | 1245 | } |
1246 | |||
1246 | /* no error, so we have the rgrp set in the inode's allocation. */ | 1247 | /* no error, so we have the rgrp set in the inode's allocation. */ |
1247 | al->al_file = file; | 1248 | al->al_file = file; |
1248 | al->al_line = line; | 1249 | al->al_line = line; |
diff --git a/fs/ioctl.c b/fs/ioctl.c index e92fdbb3bc3a..d6cc16476620 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c | |||
@@ -6,7 +6,6 @@ | |||
6 | 6 | ||
7 | #include <linux/syscalls.h> | 7 | #include <linux/syscalls.h> |
8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
9 | #include <linux/smp_lock.h> | ||
10 | #include <linux/capability.h> | 9 | #include <linux/capability.h> |
11 | #include <linux/file.h> | 10 | #include <linux/file.h> |
12 | #include <linux/fs.h> | 11 | #include <linux/fs.h> |
@@ -530,41 +529,6 @@ static int ioctl_fsthaw(struct file *filp) | |||
530 | return thaw_super(sb); | 529 | return thaw_super(sb); |
531 | } | 530 | } |
532 | 531 | ||
533 | static int ioctl_fstrim(struct file *filp, void __user *argp) | ||
534 | { | ||
535 | struct super_block *sb = filp->f_path.dentry->d_inode->i_sb; | ||
536 | struct fstrim_range range; | ||
537 | int ret = 0; | ||
538 | |||
539 | if (!capable(CAP_SYS_ADMIN)) | ||
540 | return -EPERM; | ||
541 | |||
542 | /* If filesystem doesn't support trim feature, return. */ | ||
543 | if (sb->s_op->trim_fs == NULL) | ||
544 | return -EOPNOTSUPP; | ||
545 | |||
546 | /* If a blockdevice-backed filesystem isn't specified, return EINVAL. */ | ||
547 | if (sb->s_bdev == NULL) | ||
548 | return -EINVAL; | ||
549 | |||
550 | if (argp == NULL) { | ||
551 | range.start = 0; | ||
552 | range.len = ULLONG_MAX; | ||
553 | range.minlen = 0; | ||
554 | } else if (copy_from_user(&range, argp, sizeof(range))) | ||
555 | return -EFAULT; | ||
556 | |||
557 | ret = sb->s_op->trim_fs(sb, &range); | ||
558 | if (ret < 0) | ||
559 | return ret; | ||
560 | |||
561 | if ((argp != NULL) && | ||
562 | (copy_to_user(argp, &range, sizeof(range)))) | ||
563 | return -EFAULT; | ||
564 | |||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | /* | 532 | /* |
569 | * When you add any new common ioctls to the switches above and below | 533 | * When you add any new common ioctls to the switches above and below |
570 | * please update compat_sys_ioctl() too. | 534 | * please update compat_sys_ioctl() too. |
@@ -615,10 +579,6 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, | |||
615 | error = ioctl_fsthaw(filp); | 579 | error = ioctl_fsthaw(filp); |
616 | break; | 580 | break; |
617 | 581 | ||
618 | case FITRIM: | ||
619 | error = ioctl_fstrim(filp, argp); | ||
620 | break; | ||
621 | |||
622 | case FS_IOC_FIEMAP: | 582 | case FS_IOC_FIEMAP: |
623 | return ioctl_fiemap(filp, arg); | 583 | return ioctl_fiemap(filp, arg); |
624 | 584 | ||
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index c590d155c095..f837ba953529 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -899,6 +899,14 @@ journal_t * jbd2_journal_init_dev(struct block_device *bdev, | |||
899 | 899 | ||
900 | /* journal descriptor can store up to n blocks -bzzz */ | 900 | /* journal descriptor can store up to n blocks -bzzz */ |
901 | journal->j_blocksize = blocksize; | 901 | journal->j_blocksize = blocksize; |
902 | journal->j_dev = bdev; | ||
903 | journal->j_fs_dev = fs_dev; | ||
904 | journal->j_blk_offset = start; | ||
905 | journal->j_maxlen = len; | ||
906 | bdevname(journal->j_dev, journal->j_devname); | ||
907 | p = journal->j_devname; | ||
908 | while ((p = strchr(p, '/'))) | ||
909 | *p = '!'; | ||
902 | jbd2_stats_proc_init(journal); | 910 | jbd2_stats_proc_init(journal); |
903 | n = journal->j_blocksize / sizeof(journal_block_tag_t); | 911 | n = journal->j_blocksize / sizeof(journal_block_tag_t); |
904 | journal->j_wbufsize = n; | 912 | journal->j_wbufsize = n; |
@@ -908,14 +916,6 @@ journal_t * jbd2_journal_init_dev(struct block_device *bdev, | |||
908 | __func__); | 916 | __func__); |
909 | goto out_err; | 917 | goto out_err; |
910 | } | 918 | } |
911 | journal->j_dev = bdev; | ||
912 | journal->j_fs_dev = fs_dev; | ||
913 | journal->j_blk_offset = start; | ||
914 | journal->j_maxlen = len; | ||
915 | bdevname(journal->j_dev, journal->j_devname); | ||
916 | p = journal->j_devname; | ||
917 | while ((p = strchr(p, '/'))) | ||
918 | *p = '!'; | ||
919 | 919 | ||
920 | bh = __getblk(journal->j_dev, start, journal->j_blocksize); | 920 | bh = __getblk(journal->j_dev, start, journal->j_blocksize); |
921 | if (!bh) { | 921 | if (!bh) { |
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index d5bb86866e6c..25509eb28fd7 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/sunrpc/clnt.h> | 14 | #include <linux/sunrpc/clnt.h> |
15 | #include <linux/sunrpc/svc.h> | 15 | #include <linux/sunrpc/svc.h> |
16 | #include <linux/lockd/lockd.h> | 16 | #include <linux/lockd/lockd.h> |
17 | #include <linux/smp_lock.h> | ||
18 | #include <linux/kthread.h> | 17 | #include <linux/kthread.h> |
19 | 18 | ||
20 | #define NLMDBG_FACILITY NLMDBG_CLIENT | 19 | #define NLMDBG_FACILITY NLMDBG_CLIENT |
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 47ea1e1925b8..332c54cf75e0 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c | |||
@@ -7,7 +7,6 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/smp_lock.h> | ||
11 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
12 | #include <linux/types.h> | 11 | #include <linux/types.h> |
13 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 25e21e4023b2..ed0c59fe23ce 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c | |||
@@ -124,7 +124,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) | |||
124 | continue; | 124 | continue; |
125 | if (host->h_server != ni->server) | 125 | if (host->h_server != ni->server) |
126 | continue; | 126 | continue; |
127 | if (ni->server && | 127 | if (ni->server && ni->src_len != 0 && |
128 | !rpc_cmp_addr(nlm_srcaddr(host), ni->src_sap)) | 128 | !rpc_cmp_addr(nlm_srcaddr(host), ni->src_sap)) |
129 | continue; | 129 | continue; |
130 | 130 | ||
@@ -167,6 +167,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) | |||
167 | host->h_addrlen = ni->salen; | 167 | host->h_addrlen = ni->salen; |
168 | rpc_set_port(nlm_addr(host), 0); | 168 | rpc_set_port(nlm_addr(host), 0); |
169 | memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len); | 169 | memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len); |
170 | host->h_srcaddrlen = ni->src_len; | ||
170 | host->h_version = ni->version; | 171 | host->h_version = ni->version; |
171 | host->h_proto = ni->protocol; | 172 | host->h_proto = ni->protocol; |
172 | host->h_rpcclnt = NULL; | 173 | host->h_rpcclnt = NULL; |
@@ -238,9 +239,6 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, | |||
238 | const char *hostname, | 239 | const char *hostname, |
239 | int noresvport) | 240 | int noresvport) |
240 | { | 241 | { |
241 | const struct sockaddr source = { | ||
242 | .sa_family = AF_UNSPEC, | ||
243 | }; | ||
244 | struct nlm_lookup_host_info ni = { | 242 | struct nlm_lookup_host_info ni = { |
245 | .server = 0, | 243 | .server = 0, |
246 | .sap = sap, | 244 | .sap = sap, |
@@ -249,8 +247,6 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, | |||
249 | .version = version, | 247 | .version = version, |
250 | .hostname = hostname, | 248 | .hostname = hostname, |
251 | .hostname_len = strlen(hostname), | 249 | .hostname_len = strlen(hostname), |
252 | .src_sap = &source, | ||
253 | .src_len = sizeof(source), | ||
254 | .noresvport = noresvport, | 250 | .noresvport = noresvport, |
255 | }; | 251 | }; |
256 | 252 | ||
@@ -357,7 +353,6 @@ nlm_bind_host(struct nlm_host *host) | |||
357 | .protocol = host->h_proto, | 353 | .protocol = host->h_proto, |
358 | .address = nlm_addr(host), | 354 | .address = nlm_addr(host), |
359 | .addrsize = host->h_addrlen, | 355 | .addrsize = host->h_addrlen, |
360 | .saddress = nlm_srcaddr(host), | ||
361 | .timeout = &timeparms, | 356 | .timeout = &timeparms, |
362 | .servername = host->h_name, | 357 | .servername = host->h_name, |
363 | .program = &nlm_program, | 358 | .program = &nlm_program, |
@@ -376,6 +371,8 @@ nlm_bind_host(struct nlm_host *host) | |||
376 | args.flags |= RPC_CLNT_CREATE_HARDRTRY; | 371 | args.flags |= RPC_CLNT_CREATE_HARDRTRY; |
377 | if (host->h_noresvport) | 372 | if (host->h_noresvport) |
378 | args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; | 373 | args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; |
374 | if (host->h_srcaddrlen) | ||
375 | args.saddress = nlm_srcaddr(host); | ||
379 | 376 | ||
380 | clnt = rpc_create(&args); | 377 | clnt = rpc_create(&args); |
381 | if (!IS_ERR(clnt)) | 378 | if (!IS_ERR(clnt)) |
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c index a336e832475d..38d261192453 100644 --- a/fs/lockd/svc4proc.c +++ b/fs/lockd/svc4proc.c | |||
@@ -9,7 +9,6 @@ | |||
9 | 9 | ||
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | #include <linux/time.h> | 11 | #include <linux/time.h> |
12 | #include <linux/smp_lock.h> | ||
13 | #include <linux/lockd/lockd.h> | 12 | #include <linux/lockd/lockd.h> |
14 | #include <linux/lockd/share.h> | 13 | #include <linux/lockd/share.h> |
15 | 14 | ||
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index c462d346acbd..ef5659b211e9 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/smp_lock.h> | ||
29 | #include <linux/sunrpc/clnt.h> | 28 | #include <linux/sunrpc/clnt.h> |
30 | #include <linux/sunrpc/svc.h> | 29 | #include <linux/sunrpc/svc.h> |
31 | #include <linux/lockd/nlm.h> | 30 | #include <linux/lockd/nlm.h> |
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index c3069f38d602..0caea5310ac3 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c | |||
@@ -9,7 +9,6 @@ | |||
9 | 9 | ||
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | #include <linux/time.h> | 11 | #include <linux/time.h> |
12 | #include <linux/smp_lock.h> | ||
13 | #include <linux/lockd/lockd.h> | 12 | #include <linux/lockd/lockd.h> |
14 | #include <linux/lockd/share.h> | 13 | #include <linux/lockd/share.h> |
15 | 14 | ||
diff --git a/fs/locks.c b/fs/locks.c index 0e62dd35d088..8729347bcd1a 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -122,7 +122,6 @@ | |||
122 | #include <linux/module.h> | 122 | #include <linux/module.h> |
123 | #include <linux/security.h> | 123 | #include <linux/security.h> |
124 | #include <linux/slab.h> | 124 | #include <linux/slab.h> |
125 | #include <linux/smp_lock.h> | ||
126 | #include <linux/syscalls.h> | 125 | #include <linux/syscalls.h> |
127 | #include <linux/time.h> | 126 | #include <linux/time.h> |
128 | #include <linux/rcupdate.h> | 127 | #include <linux/rcupdate.h> |
diff --git a/fs/namespace.c b/fs/namespace.c index 8a415c9c5e55..3dbfc072ec70 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/percpu.h> | 15 | #include <linux/percpu.h> |
16 | #include <linux/smp_lock.h> | ||
17 | #include <linux/init.h> | 16 | #include <linux/init.h> |
18 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
19 | #include <linux/acct.h> | 18 | #include <linux/acct.h> |
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index aac8832e919e..f22b12e7d337 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
20 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
21 | #include <asm/byteorder.h> | 21 | #include <asm/byteorder.h> |
22 | #include <linux/smp_lock.h> | ||
23 | 22 | ||
24 | #include <linux/ncp_fs.h> | 23 | #include <linux/ncp_fs.h> |
25 | 24 | ||
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c index 6c754f70c529..cb50aaf981df 100644 --- a/fs/ncpfs/file.c +++ b/fs/ncpfs/file.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/vmalloc.h> | 18 | #include <linux/vmalloc.h> |
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <linux/smp_lock.h> | ||
21 | 20 | ||
22 | #include <linux/ncp_fs.h> | 21 | #include <linux/ncp_fs.h> |
23 | #include "ncplib_kernel.h" | 22 | #include "ncplib_kernel.h" |
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index d290545aa0c4..8fb93b604e73 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/vmalloc.h> | 27 | #include <linux/vmalloc.h> |
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/smp_lock.h> | ||
30 | #include <linux/vfs.h> | 29 | #include <linux/vfs.h> |
31 | #include <linux/mount.h> | 30 | #include <linux/mount.h> |
32 | #include <linux/seq_file.h> | 31 | #include <linux/seq_file.h> |
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c index c2a1f9a155c3..d40a547e3377 100644 --- a/fs/ncpfs/ioctl.c +++ b/fs/ncpfs/ioctl.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/mount.h> | 17 | #include <linux/mount.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/highuid.h> | 19 | #include <linux/highuid.h> |
20 | #include <linux/smp_lock.h> | ||
21 | #include <linux/vmalloc.h> | 20 | #include <linux/vmalloc.h> |
22 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
23 | 22 | ||
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index aeec017fe814..93a8b3bd69e3 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/completion.h> | 9 | #include <linux/completion.h> |
10 | #include <linux/ip.h> | 10 | #include <linux/ip.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/smp_lock.h> | ||
13 | #include <linux/sunrpc/svc.h> | 12 | #include <linux/sunrpc/svc.h> |
14 | #include <linux/sunrpc/svcsock.h> | 13 | #include <linux/sunrpc/svcsock.h> |
15 | #include <linux/nfs_fs.h> | 14 | #include <linux/nfs_fs.h> |
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 232a7eead33a..1fd62fc49be3 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/smp_lock.h> | ||
15 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
16 | 15 | ||
17 | #include <linux/nfs4.h> | 16 | #include <linux/nfs4.h> |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 07ac3847e562..662df2a5fad5 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/mount.h> | 34 | #include <linux/mount.h> |
35 | #include <linux/sched.h> | 35 | #include <linux/sched.h> |
36 | #include <linux/vmalloc.h> | 36 | #include <linux/vmalloc.h> |
37 | #include <linux/kmemleak.h> | ||
37 | 38 | ||
38 | #include "delegation.h" | 39 | #include "delegation.h" |
39 | #include "iostat.h" | 40 | #include "iostat.h" |
@@ -194,9 +195,13 @@ typedef struct { | |||
194 | static | 195 | static |
195 | struct nfs_cache_array *nfs_readdir_get_array(struct page *page) | 196 | struct nfs_cache_array *nfs_readdir_get_array(struct page *page) |
196 | { | 197 | { |
198 | void *ptr; | ||
197 | if (page == NULL) | 199 | if (page == NULL) |
198 | return ERR_PTR(-EIO); | 200 | return ERR_PTR(-EIO); |
199 | return (struct nfs_cache_array *)kmap(page); | 201 | ptr = kmap(page); |
202 | if (ptr == NULL) | ||
203 | return ERR_PTR(-ENOMEM); | ||
204 | return ptr; | ||
200 | } | 205 | } |
201 | 206 | ||
202 | static | 207 | static |
@@ -213,6 +218,9 @@ int nfs_readdir_clear_array(struct page *page, gfp_t mask) | |||
213 | { | 218 | { |
214 | struct nfs_cache_array *array = nfs_readdir_get_array(page); | 219 | struct nfs_cache_array *array = nfs_readdir_get_array(page); |
215 | int i; | 220 | int i; |
221 | |||
222 | if (IS_ERR(array)) | ||
223 | return PTR_ERR(array); | ||
216 | for (i = 0; i < array->size; i++) | 224 | for (i = 0; i < array->size; i++) |
217 | kfree(array->array[i].string.name); | 225 | kfree(array->array[i].string.name); |
218 | nfs_readdir_release_array(page); | 226 | nfs_readdir_release_array(page); |
@@ -231,6 +239,11 @@ int nfs_readdir_make_qstr(struct qstr *string, const char *name, unsigned int le | |||
231 | string->name = kmemdup(name, len, GFP_KERNEL); | 239 | string->name = kmemdup(name, len, GFP_KERNEL); |
232 | if (string->name == NULL) | 240 | if (string->name == NULL) |
233 | return -ENOMEM; | 241 | return -ENOMEM; |
242 | /* | ||
243 | * Avoid a kmemleak false positive. The pointer to the name is stored | ||
244 | * in a page cache page which kmemleak does not scan. | ||
245 | */ | ||
246 | kmemleak_not_leak(string->name); | ||
234 | string->hash = full_name_hash(name, len); | 247 | string->hash = full_name_hash(name, len); |
235 | return 0; | 248 | return 0; |
236 | } | 249 | } |
@@ -244,7 +257,7 @@ int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page) | |||
244 | 257 | ||
245 | if (IS_ERR(array)) | 258 | if (IS_ERR(array)) |
246 | return PTR_ERR(array); | 259 | return PTR_ERR(array); |
247 | ret = -EIO; | 260 | ret = -ENOSPC; |
248 | if (array->size >= MAX_READDIR_ARRAY) | 261 | if (array->size >= MAX_READDIR_ARRAY) |
249 | goto out; | 262 | goto out; |
250 | 263 | ||
@@ -255,9 +268,9 @@ int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page) | |||
255 | if (ret) | 268 | if (ret) |
256 | goto out; | 269 | goto out; |
257 | array->last_cookie = entry->cookie; | 270 | array->last_cookie = entry->cookie; |
271 | array->size++; | ||
258 | if (entry->eof == 1) | 272 | if (entry->eof == 1) |
259 | array->eof_index = array->size; | 273 | array->eof_index = array->size; |
260 | array->size++; | ||
261 | out: | 274 | out: |
262 | nfs_readdir_release_array(page); | 275 | nfs_readdir_release_array(page); |
263 | return ret; | 276 | return ret; |
@@ -272,7 +285,7 @@ int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descri | |||
272 | if (diff < 0) | 285 | if (diff < 0) |
273 | goto out_eof; | 286 | goto out_eof; |
274 | if (diff >= array->size) { | 287 | if (diff >= array->size) { |
275 | if (array->eof_index > 0) | 288 | if (array->eof_index >= 0) |
276 | goto out_eof; | 289 | goto out_eof; |
277 | desc->current_index += array->size; | 290 | desc->current_index += array->size; |
278 | return -EAGAIN; | 291 | return -EAGAIN; |
@@ -281,8 +294,6 @@ int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descri | |||
281 | index = (unsigned int)diff; | 294 | index = (unsigned int)diff; |
282 | *desc->dir_cookie = array->array[index].cookie; | 295 | *desc->dir_cookie = array->array[index].cookie; |
283 | desc->cache_entry_index = index; | 296 | desc->cache_entry_index = index; |
284 | if (index == array->eof_index) | ||
285 | desc->eof = 1; | ||
286 | return 0; | 297 | return 0; |
287 | out_eof: | 298 | out_eof: |
288 | desc->eof = 1; | 299 | desc->eof = 1; |
@@ -296,17 +307,17 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des | |||
296 | int status = -EAGAIN; | 307 | int status = -EAGAIN; |
297 | 308 | ||
298 | for (i = 0; i < array->size; i++) { | 309 | for (i = 0; i < array->size; i++) { |
299 | if (i == array->eof_index) { | ||
300 | desc->eof = 1; | ||
301 | status = -EBADCOOKIE; | ||
302 | } | ||
303 | if (array->array[i].cookie == *desc->dir_cookie) { | 310 | if (array->array[i].cookie == *desc->dir_cookie) { |
304 | desc->cache_entry_index = i; | 311 | desc->cache_entry_index = i; |
305 | status = 0; | 312 | status = 0; |
306 | break; | 313 | goto out; |
307 | } | 314 | } |
308 | } | 315 | } |
309 | 316 | if (i == array->eof_index) { | |
317 | desc->eof = 1; | ||
318 | status = -EBADCOOKIE; | ||
319 | } | ||
320 | out: | ||
310 | return status; | 321 | return status; |
311 | } | 322 | } |
312 | 323 | ||
@@ -449,7 +460,7 @@ out: | |||
449 | 460 | ||
450 | /* Perform conversion from xdr to cache array */ | 461 | /* Perform conversion from xdr to cache array */ |
451 | static | 462 | static |
452 | void nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry, | 463 | int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry, |
453 | void *xdr_page, struct page *page, unsigned int buflen) | 464 | void *xdr_page, struct page *page, unsigned int buflen) |
454 | { | 465 | { |
455 | struct xdr_stream stream; | 466 | struct xdr_stream stream; |
@@ -471,21 +482,29 @@ void nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *e | |||
471 | 482 | ||
472 | do { | 483 | do { |
473 | status = xdr_decode(desc, entry, &stream); | 484 | status = xdr_decode(desc, entry, &stream); |
474 | if (status != 0) | 485 | if (status != 0) { |
486 | if (status == -EAGAIN) | ||
487 | status = 0; | ||
475 | break; | 488 | break; |
489 | } | ||
476 | 490 | ||
477 | if (nfs_readdir_add_to_array(entry, page) == -1) | ||
478 | break; | ||
479 | if (desc->plus == 1) | 491 | if (desc->plus == 1) |
480 | nfs_prime_dcache(desc->file->f_path.dentry, entry); | 492 | nfs_prime_dcache(desc->file->f_path.dentry, entry); |
493 | |||
494 | status = nfs_readdir_add_to_array(entry, page); | ||
495 | if (status != 0) | ||
496 | break; | ||
481 | } while (!entry->eof); | 497 | } while (!entry->eof); |
482 | 498 | ||
483 | if (status == -EBADCOOKIE && entry->eof) { | 499 | if (status == -EBADCOOKIE && entry->eof) { |
484 | array = nfs_readdir_get_array(page); | 500 | array = nfs_readdir_get_array(page); |
485 | array->eof_index = array->size - 1; | 501 | if (!IS_ERR(array)) { |
486 | status = 0; | 502 | array->eof_index = array->size; |
487 | nfs_readdir_release_array(page); | 503 | status = 0; |
504 | nfs_readdir_release_array(page); | ||
505 | } | ||
488 | } | 506 | } |
507 | return status; | ||
489 | } | 508 | } |
490 | 509 | ||
491 | static | 510 | static |
@@ -537,7 +556,7 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, | |||
537 | struct nfs_entry entry; | 556 | struct nfs_entry entry; |
538 | struct file *file = desc->file; | 557 | struct file *file = desc->file; |
539 | struct nfs_cache_array *array; | 558 | struct nfs_cache_array *array; |
540 | int status = 0; | 559 | int status = -ENOMEM; |
541 | unsigned int array_size = ARRAY_SIZE(pages); | 560 | unsigned int array_size = ARRAY_SIZE(pages); |
542 | 561 | ||
543 | entry.prev_cookie = 0; | 562 | entry.prev_cookie = 0; |
@@ -549,6 +568,10 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, | |||
549 | goto out; | 568 | goto out; |
550 | 569 | ||
551 | array = nfs_readdir_get_array(page); | 570 | array = nfs_readdir_get_array(page); |
571 | if (IS_ERR(array)) { | ||
572 | status = PTR_ERR(array); | ||
573 | goto out; | ||
574 | } | ||
552 | memset(array, 0, sizeof(struct nfs_cache_array)); | 575 | memset(array, 0, sizeof(struct nfs_cache_array)); |
553 | array->eof_index = -1; | 576 | array->eof_index = -1; |
554 | 577 | ||
@@ -556,12 +579,19 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, | |||
556 | if (!pages_ptr) | 579 | if (!pages_ptr) |
557 | goto out_release_array; | 580 | goto out_release_array; |
558 | do { | 581 | do { |
582 | unsigned int pglen; | ||
559 | status = nfs_readdir_xdr_filler(pages, desc, &entry, file, inode); | 583 | status = nfs_readdir_xdr_filler(pages, desc, &entry, file, inode); |
560 | 584 | ||
561 | if (status < 0) | 585 | if (status < 0) |
562 | break; | 586 | break; |
563 | nfs_readdir_page_filler(desc, &entry, pages_ptr, page, array_size * PAGE_SIZE); | 587 | pglen = status; |
564 | } while (array->eof_index < 0 && array->size < MAX_READDIR_ARRAY); | 588 | status = nfs_readdir_page_filler(desc, &entry, pages_ptr, page, pglen); |
589 | if (status < 0) { | ||
590 | if (status == -ENOSPC) | ||
591 | status = 0; | ||
592 | break; | ||
593 | } | ||
594 | } while (array->eof_index < 0); | ||
565 | 595 | ||
566 | nfs_readdir_free_large_page(pages_ptr, pages, array_size); | 596 | nfs_readdir_free_large_page(pages_ptr, pages, array_size); |
567 | out_release_array: | 597 | out_release_array: |
@@ -582,8 +612,10 @@ static | |||
582 | int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page) | 612 | int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page) |
583 | { | 613 | { |
584 | struct inode *inode = desc->file->f_path.dentry->d_inode; | 614 | struct inode *inode = desc->file->f_path.dentry->d_inode; |
615 | int ret; | ||
585 | 616 | ||
586 | if (nfs_readdir_xdr_to_array(desc, page, inode) < 0) | 617 | ret = nfs_readdir_xdr_to_array(desc, page, inode); |
618 | if (ret < 0) | ||
587 | goto error; | 619 | goto error; |
588 | SetPageUptodate(page); | 620 | SetPageUptodate(page); |
589 | 621 | ||
@@ -595,7 +627,7 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page) | |||
595 | return 0; | 627 | return 0; |
596 | error: | 628 | error: |
597 | unlock_page(page); | 629 | unlock_page(page); |
598 | return -EIO; | 630 | return ret; |
599 | } | 631 | } |
600 | 632 | ||
601 | static | 633 | static |
@@ -608,12 +640,8 @@ void cache_page_release(nfs_readdir_descriptor_t *desc) | |||
608 | static | 640 | static |
609 | struct page *get_cache_page(nfs_readdir_descriptor_t *desc) | 641 | struct page *get_cache_page(nfs_readdir_descriptor_t *desc) |
610 | { | 642 | { |
611 | struct page *page; | 643 | return read_cache_page(desc->file->f_path.dentry->d_inode->i_mapping, |
612 | page = read_cache_page(desc->file->f_path.dentry->d_inode->i_mapping, | ||
613 | desc->page_index, (filler_t *)nfs_readdir_filler, desc); | 644 | desc->page_index, (filler_t *)nfs_readdir_filler, desc); |
614 | if (IS_ERR(page)) | ||
615 | desc->eof = 1; | ||
616 | return page; | ||
617 | } | 645 | } |
618 | 646 | ||
619 | /* | 647 | /* |
@@ -639,8 +667,10 @@ int find_cache_page(nfs_readdir_descriptor_t *desc) | |||
639 | static inline | 667 | static inline |
640 | int readdir_search_pagecache(nfs_readdir_descriptor_t *desc) | 668 | int readdir_search_pagecache(nfs_readdir_descriptor_t *desc) |
641 | { | 669 | { |
642 | int res = -EAGAIN; | 670 | int res; |
643 | 671 | ||
672 | if (desc->page_index == 0) | ||
673 | desc->current_index = 0; | ||
644 | while (1) { | 674 | while (1) { |
645 | res = find_cache_page(desc); | 675 | res = find_cache_page(desc); |
646 | if (res != -EAGAIN) | 676 | if (res != -EAGAIN) |
@@ -670,6 +700,8 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent, | |||
670 | struct dentry *dentry = NULL; | 700 | struct dentry *dentry = NULL; |
671 | 701 | ||
672 | array = nfs_readdir_get_array(desc->page); | 702 | array = nfs_readdir_get_array(desc->page); |
703 | if (IS_ERR(array)) | ||
704 | return PTR_ERR(array); | ||
673 | 705 | ||
674 | for (i = desc->cache_entry_index; i < array->size; i++) { | 706 | for (i = desc->cache_entry_index; i < array->size; i++) { |
675 | d_type = DT_UNKNOWN; | 707 | d_type = DT_UNKNOWN; |
@@ -685,11 +717,9 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent, | |||
685 | *desc->dir_cookie = array->array[i+1].cookie; | 717 | *desc->dir_cookie = array->array[i+1].cookie; |
686 | else | 718 | else |
687 | *desc->dir_cookie = array->last_cookie; | 719 | *desc->dir_cookie = array->last_cookie; |
688 | if (i == array->eof_index) { | ||
689 | desc->eof = 1; | ||
690 | break; | ||
691 | } | ||
692 | } | 720 | } |
721 | if (i == array->eof_index) | ||
722 | desc->eof = 1; | ||
693 | 723 | ||
694 | nfs_readdir_release_array(desc->page); | 724 | nfs_readdir_release_array(desc->page); |
695 | cache_page_release(desc); | 725 | cache_page_release(desc); |
@@ -1345,12 +1375,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry | |||
1345 | res = NULL; | 1375 | res = NULL; |
1346 | goto out; | 1376 | goto out; |
1347 | /* This turned out not to be a regular file */ | 1377 | /* This turned out not to be a regular file */ |
1348 | case -EISDIR: | ||
1349 | case -ENOTDIR: | 1378 | case -ENOTDIR: |
1350 | goto no_open; | 1379 | goto no_open; |
1351 | case -ELOOP: | 1380 | case -ELOOP: |
1352 | if (!(nd->intent.open.flags & O_NOFOLLOW)) | 1381 | if (!(nd->intent.open.flags & O_NOFOLLOW)) |
1353 | goto no_open; | 1382 | goto no_open; |
1383 | /* case -EISDIR: */ | ||
1354 | /* case -EINVAL: */ | 1384 | /* case -EINVAL: */ |
1355 | default: | 1385 | default: |
1356 | res = ERR_CAST(inode); | 1386 | res = ERR_CAST(inode); |
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index e6bf45710cc7..2563f765c9b4 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c | |||
@@ -423,7 +423,7 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy) | |||
423 | struct page **page; | 423 | struct page **page; |
424 | size_t hdrlen; | 424 | size_t hdrlen; |
425 | unsigned int pglen, recvd; | 425 | unsigned int pglen, recvd; |
426 | int status, nr = 0; | 426 | int status; |
427 | 427 | ||
428 | if ((status = ntohl(*p++))) | 428 | if ((status = ntohl(*p++))) |
429 | return nfs_stat_to_errno(status); | 429 | return nfs_stat_to_errno(status); |
@@ -443,7 +443,7 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy) | |||
443 | if (pglen > recvd) | 443 | if (pglen > recvd) |
444 | pglen = recvd; | 444 | pglen = recvd; |
445 | page = rcvbuf->pages; | 445 | page = rcvbuf->pages; |
446 | return nr; | 446 | return pglen; |
447 | } | 447 | } |
448 | 448 | ||
449 | static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) | 449 | static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) |
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index d9a5e832c257..748dc91a4a14 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c | |||
@@ -555,7 +555,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res | |||
555 | struct page **page; | 555 | struct page **page; |
556 | size_t hdrlen; | 556 | size_t hdrlen; |
557 | u32 recvd, pglen; | 557 | u32 recvd, pglen; |
558 | int status, nr = 0; | 558 | int status; |
559 | 559 | ||
560 | status = ntohl(*p++); | 560 | status = ntohl(*p++); |
561 | /* Decode post_op_attrs */ | 561 | /* Decode post_op_attrs */ |
@@ -586,7 +586,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res | |||
586 | pglen = recvd; | 586 | pglen = recvd; |
587 | page = rcvbuf->pages; | 587 | page = rcvbuf->pages; |
588 | 588 | ||
589 | return nr; | 589 | return pglen; |
590 | } | 590 | } |
591 | 591 | ||
592 | __be32 * | 592 | __be32 * |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 0f24cdf2cb13..6a653ffd8e4e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -2852,8 +2852,10 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, | |||
2852 | nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); | 2852 | nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); |
2853 | res.pgbase = args.pgbase; | 2853 | res.pgbase = args.pgbase; |
2854 | status = nfs4_call_sync(NFS_SERVER(dir), &msg, &args, &res, 0); | 2854 | status = nfs4_call_sync(NFS_SERVER(dir), &msg, &args, &res, 0); |
2855 | if (status == 0) | 2855 | if (status >= 0) { |
2856 | memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); | 2856 | memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); |
2857 | status += args.pgbase; | ||
2858 | } | ||
2857 | 2859 | ||
2858 | nfs_invalidate_atime(dir); | 2860 | nfs_invalidate_atime(dir); |
2859 | 2861 | ||
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index f313c4cce7e4..b7a204ff6fe1 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -4518,7 +4518,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n | |||
4518 | xdr_read_pages(xdr, pglen); | 4518 | xdr_read_pages(xdr, pglen); |
4519 | 4519 | ||
4520 | 4520 | ||
4521 | return 0; | 4521 | return pglen; |
4522 | } | 4522 | } |
4523 | 4523 | ||
4524 | static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req) | 4524 | static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req) |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 0a42e8f4adcb..3c045044fca2 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -39,7 +39,6 @@ | |||
39 | #include <linux/nfs_mount.h> | 39 | #include <linux/nfs_mount.h> |
40 | #include <linux/nfs4_mount.h> | 40 | #include <linux/nfs4_mount.h> |
41 | #include <linux/lockd/bind.h> | 41 | #include <linux/lockd/bind.h> |
42 | #include <linux/smp_lock.h> | ||
43 | #include <linux/seq_file.h> | 42 | #include <linux/seq_file.h> |
44 | #include <linux/mount.h> | 43 | #include <linux/mount.h> |
45 | #include <linux/mnt_namespace.h> | 44 | #include <linux/mnt_namespace.h> |
@@ -67,6 +66,12 @@ | |||
67 | 66 | ||
68 | #define NFSDBG_FACILITY NFSDBG_VFS | 67 | #define NFSDBG_FACILITY NFSDBG_VFS |
69 | 68 | ||
69 | #ifdef CONFIG_NFS_V3 | ||
70 | #define NFS_DEFAULT_VERSION 3 | ||
71 | #else | ||
72 | #define NFS_DEFAULT_VERSION 2 | ||
73 | #endif | ||
74 | |||
70 | enum { | 75 | enum { |
71 | /* Mount options that take no arguments */ | 76 | /* Mount options that take no arguments */ |
72 | Opt_soft, Opt_hard, | 77 | Opt_soft, Opt_hard, |
@@ -2277,7 +2282,7 @@ static int nfs_get_sb(struct file_system_type *fs_type, | |||
2277 | }; | 2282 | }; |
2278 | int error = -ENOMEM; | 2283 | int error = -ENOMEM; |
2279 | 2284 | ||
2280 | data = nfs_alloc_parsed_mount_data(3); | 2285 | data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION); |
2281 | mntfh = nfs_alloc_fhandle(); | 2286 | mntfh = nfs_alloc_fhandle(); |
2282 | if (data == NULL || mntfh == NULL) | 2287 | if (data == NULL || mntfh == NULL) |
2283 | goto out_free_fh; | 2288 | goto out_free_fh; |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index ad2bfa68d534..116cab970e0f 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -2262,7 +2262,7 @@ nfs4_file_downgrade(struct nfs4_file *fp, unsigned int share_access) | |||
2262 | * Spawn a thread to perform a recall on the delegation represented | 2262 | * Spawn a thread to perform a recall on the delegation represented |
2263 | * by the lease (file_lock) | 2263 | * by the lease (file_lock) |
2264 | * | 2264 | * |
2265 | * Called from break_lease() with lock_kernel() held. | 2265 | * Called from break_lease() with lock_flocks() held. |
2266 | * Note: we assume break_lease will only call this *once* for any given | 2266 | * Note: we assume break_lease will only call this *once* for any given |
2267 | * lease. | 2267 | * lease. |
2268 | */ | 2268 | */ |
@@ -2286,7 +2286,7 @@ void nfsd_break_deleg_cb(struct file_lock *fl) | |||
2286 | list_add_tail(&dp->dl_recall_lru, &del_recall_lru); | 2286 | list_add_tail(&dp->dl_recall_lru, &del_recall_lru); |
2287 | spin_unlock(&recall_lock); | 2287 | spin_unlock(&recall_lock); |
2288 | 2288 | ||
2289 | /* only place dl_time is set. protected by lock_kernel*/ | 2289 | /* only place dl_time is set. protected by lock_flocks*/ |
2290 | dp->dl_time = get_seconds(); | 2290 | dp->dl_time = get_seconds(); |
2291 | 2291 | ||
2292 | /* | 2292 | /* |
@@ -2303,7 +2303,7 @@ void nfsd_break_deleg_cb(struct file_lock *fl) | |||
2303 | /* | 2303 | /* |
2304 | * The file_lock is being reapd. | 2304 | * The file_lock is being reapd. |
2305 | * | 2305 | * |
2306 | * Called by locks_free_lock() with lock_kernel() held. | 2306 | * Called by locks_free_lock() with lock_flocks() held. |
2307 | */ | 2307 | */ |
2308 | static | 2308 | static |
2309 | void nfsd_release_deleg_cb(struct file_lock *fl) | 2309 | void nfsd_release_deleg_cb(struct file_lock *fl) |
@@ -2318,7 +2318,7 @@ void nfsd_release_deleg_cb(struct file_lock *fl) | |||
2318 | } | 2318 | } |
2319 | 2319 | ||
2320 | /* | 2320 | /* |
2321 | * Called from setlease() with lock_kernel() held | 2321 | * Called from setlease() with lock_flocks() held |
2322 | */ | 2322 | */ |
2323 | static | 2323 | static |
2324 | int nfsd_same_client_deleg_cb(struct file_lock *onlist, struct file_lock *try) | 2324 | int nfsd_same_client_deleg_cb(struct file_lock *onlist, struct file_lock *try) |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index d8408217e3bd..1efea3615589 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -159,7 +159,9 @@ struct ocfs2_lock_res { | |||
159 | char l_name[OCFS2_LOCK_ID_MAX_LEN]; | 159 | char l_name[OCFS2_LOCK_ID_MAX_LEN]; |
160 | unsigned int l_ro_holders; | 160 | unsigned int l_ro_holders; |
161 | unsigned int l_ex_holders; | 161 | unsigned int l_ex_holders; |
162 | unsigned char l_level; | 162 | char l_level; |
163 | char l_requested; | ||
164 | char l_blocking; | ||
163 | 165 | ||
164 | /* Data packed - type enum ocfs2_lock_type */ | 166 | /* Data packed - type enum ocfs2_lock_type */ |
165 | unsigned char l_type; | 167 | unsigned char l_type; |
@@ -169,8 +171,6 @@ struct ocfs2_lock_res { | |||
169 | unsigned char l_action; | 171 | unsigned char l_action; |
170 | /* Data packed - enum type ocfs2_unlock_action */ | 172 | /* Data packed - enum type ocfs2_unlock_action */ |
171 | unsigned char l_unlock_action; | 173 | unsigned char l_unlock_action; |
172 | unsigned char l_requested; | ||
173 | unsigned char l_blocking; | ||
174 | unsigned int l_pending_gen; | 174 | unsigned int l_pending_gen; |
175 | 175 | ||
176 | spinlock_t l_lock; | 176 | spinlock_t l_lock; |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index f02c0ef31578..cfeab7ce3697 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include <linux/mount.h> | 41 | #include <linux/mount.h> |
42 | #include <linux/seq_file.h> | 42 | #include <linux/seq_file.h> |
43 | #include <linux/quotaops.h> | 43 | #include <linux/quotaops.h> |
44 | #include <linux/smp_lock.h> | ||
45 | 44 | ||
46 | #define MLOG_MASK_PREFIX ML_SUPER | 45 | #define MLOG_MASK_PREFIX ML_SUPER |
47 | #include <cluster/masklog.h> | 46 | #include <cluster/masklog.h> |
diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 9c2b5f484879..3ddb6068177c 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/limits.h> | 16 | #include <linux/limits.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/smp_lock.h> | ||
20 | #include <linux/sysctl.h> | 19 | #include <linux/sysctl.h> |
21 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
22 | 21 | ||
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index da6b01d70f01..c126c83b9a45 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -706,6 +706,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, | |||
706 | * skip over unmapped regions. | 706 | * skip over unmapped regions. |
707 | */ | 707 | */ |
708 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) | 708 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) |
709 | #define PAGEMAP_WALK_MASK (PMD_MASK) | ||
709 | static ssize_t pagemap_read(struct file *file, char __user *buf, | 710 | static ssize_t pagemap_read(struct file *file, char __user *buf, |
710 | size_t count, loff_t *ppos) | 711 | size_t count, loff_t *ppos) |
711 | { | 712 | { |
@@ -776,7 +777,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
776 | unsigned long end; | 777 | unsigned long end; |
777 | 778 | ||
778 | pm.pos = 0; | 779 | pm.pos = 0; |
779 | end = start_vaddr + PAGEMAP_WALK_SIZE; | 780 | end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; |
780 | /* overflow ? */ | 781 | /* overflow ? */ |
781 | if (end < start_vaddr || end > end_vaddr) | 782 | if (end < start_vaddr || end > end_vaddr) |
782 | end = end_vaddr; | 783 | end = end_vaddr; |
diff --git a/fs/read_write.c b/fs/read_write.c index 431a0ed610c8..5d431bacbea9 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/fcntl.h> | 9 | #include <linux/fcntl.h> |
10 | #include <linux/file.h> | 10 | #include <linux/file.h> |
11 | #include <linux/uio.h> | 11 | #include <linux/uio.h> |
12 | #include <linux/smp_lock.h> | ||
13 | #include <linux/fsnotify.h> | 12 | #include <linux/fsnotify.h> |
14 | #include <linux/security.h> | 13 | #include <linux/security.h> |
15 | #include <linux/module.h> | 14 | #include <linux/module.h> |
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 41656d40dc5c..0bae036831e2 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <linux/reiserfs_acl.h> | 8 | #include <linux/reiserfs_acl.h> |
9 | #include <linux/reiserfs_xattr.h> | 9 | #include <linux/reiserfs_xattr.h> |
10 | #include <linux/exportfs.h> | 10 | #include <linux/exportfs.h> |
11 | #include <linux/smp_lock.h> | ||
12 | #include <linux/pagemap.h> | 11 | #include <linux/pagemap.h> |
13 | #include <linux/highmem.h> | 12 | #include <linux/highmem.h> |
14 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c index adf22b485cea..79265fdc317a 100644 --- a/fs/reiserfs/ioctl.c +++ b/fs/reiserfs/ioctl.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/time.h> | 9 | #include <linux/time.h> |
10 | #include <asm/uaccess.h> | 10 | #include <asm/uaccess.h> |
11 | #include <linux/pagemap.h> | 11 | #include <linux/pagemap.h> |
12 | #include <linux/smp_lock.h> | ||
13 | #include <linux/compat.h> | 12 | #include <linux/compat.h> |
14 | 13 | ||
15 | /* | 14 | /* |
@@ -184,12 +183,11 @@ int reiserfs_unpack(struct inode *inode, struct file *filp) | |||
184 | return 0; | 183 | return 0; |
185 | } | 184 | } |
186 | 185 | ||
187 | /* we need to make sure nobody is changing the file size beneath | ||
188 | ** us | ||
189 | */ | ||
190 | reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb); | ||
191 | depth = reiserfs_write_lock_once(inode->i_sb); | 186 | depth = reiserfs_write_lock_once(inode->i_sb); |
192 | 187 | ||
188 | /* we need to make sure nobody is changing the file size beneath us */ | ||
189 | reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb); | ||
190 | |||
193 | write_from = inode->i_size & (blocksize - 1); | 191 | write_from = inode->i_size & (blocksize - 1); |
194 | /* if we are on a block boundary, we are already unpacked. */ | 192 | /* if we are on a block boundary, we are already unpacked. */ |
195 | if (write_from == 0) { | 193 | if (write_from == 0) { |
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 076c8b194682..d31bce1a9f90 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #include <linux/fcntl.h> | 43 | #include <linux/fcntl.h> |
44 | #include <linux/stat.h> | 44 | #include <linux/stat.h> |
45 | #include <linux/string.h> | 45 | #include <linux/string.h> |
46 | #include <linux/smp_lock.h> | ||
47 | #include <linux/buffer_head.h> | 46 | #include <linux/buffer_head.h> |
48 | #include <linux/workqueue.h> | 47 | #include <linux/workqueue.h> |
49 | #include <linux/writeback.h> | 48 | #include <linux/writeback.h> |
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 3bf7a6457f4d..b243117b8752 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/mount.h> | 28 | #include <linux/mount.h> |
29 | #include <linux/namei.h> | 29 | #include <linux/namei.h> |
30 | #include <linux/crc32.h> | 30 | #include <linux/crc32.h> |
31 | #include <linux/smp_lock.h> | ||
32 | 31 | ||
33 | struct file_system_type reiserfs_fs_type; | 32 | struct file_system_type reiserfs_fs_type; |
34 | 33 | ||
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h index 01a714119506..bc5590b1a1ac 100644 --- a/include/drm/nouveau_drm.h +++ b/include/drm/nouveau_drm.h | |||
@@ -80,6 +80,7 @@ struct drm_nouveau_gpuobj_free { | |||
80 | #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 | 80 | #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 |
81 | #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 | 81 | #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 |
82 | #define NOUVEAU_GETPARAM_PTIMER_TIME 14 | 82 | #define NOUVEAU_GETPARAM_PTIMER_TIME 14 |
83 | #define NOUVEAU_GETPARAM_HAS_BO_USAGE 15 | ||
83 | struct drm_nouveau_getparam { | 84 | struct drm_nouveau_getparam { |
84 | uint64_t param; | 85 | uint64_t param; |
85 | uint64_t value; | 86 | uint64_t value; |
@@ -95,6 +96,12 @@ struct drm_nouveau_setparam { | |||
95 | #define NOUVEAU_GEM_DOMAIN_GART (1 << 2) | 96 | #define NOUVEAU_GEM_DOMAIN_GART (1 << 2) |
96 | #define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3) | 97 | #define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3) |
97 | 98 | ||
99 | #define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00 | ||
100 | #define NOUVEAU_GEM_TILE_16BPP 0x00000001 | ||
101 | #define NOUVEAU_GEM_TILE_32BPP 0x00000002 | ||
102 | #define NOUVEAU_GEM_TILE_ZETA 0x00000004 | ||
103 | #define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008 | ||
104 | |||
98 | struct drm_nouveau_gem_info { | 105 | struct drm_nouveau_gem_info { |
99 | uint32_t handle; | 106 | uint32_t handle; |
100 | uint32_t domain; | 107 | uint32_t domain; |
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index f22b2e941686..9e76d35670d2 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h | |||
@@ -227,8 +227,7 @@ extern int ceph_open_session(struct ceph_client *client); | |||
227 | extern void ceph_release_page_vector(struct page **pages, int num_pages); | 227 | extern void ceph_release_page_vector(struct page **pages, int num_pages); |
228 | 228 | ||
229 | extern struct page **ceph_get_direct_page_vector(const char __user *data, | 229 | extern struct page **ceph_get_direct_page_vector(const char __user *data, |
230 | int num_pages, | 230 | int num_pages); |
231 | loff_t off, size_t len); | ||
232 | extern void ceph_put_page_vector(struct page **pages, int num_pages); | 231 | extern void ceph_put_page_vector(struct page **pages, int num_pages); |
233 | extern void ceph_release_page_vector(struct page **pages, int num_pages); | 232 | extern void ceph_release_page_vector(struct page **pages, int num_pages); |
234 | extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); | 233 | extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); |
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 5956d62c3057..a108b425fee2 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h | |||
@@ -82,6 +82,7 @@ struct ceph_msg { | |||
82 | struct ceph_buffer *middle; | 82 | struct ceph_buffer *middle; |
83 | struct page **pages; /* data payload. NOT OWNER. */ | 83 | struct page **pages; /* data payload. NOT OWNER. */ |
84 | unsigned nr_pages; /* size of page array */ | 84 | unsigned nr_pages; /* size of page array */ |
85 | unsigned page_alignment; /* io offset in first page */ | ||
85 | struct ceph_pagelist *pagelist; /* instead of pages */ | 86 | struct ceph_pagelist *pagelist; /* instead of pages */ |
86 | struct list_head list_head; | 87 | struct list_head list_head; |
87 | struct kref kref; | 88 | struct kref kref; |
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 6c91fb032c39..a1af29648fb5 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h | |||
@@ -79,6 +79,7 @@ struct ceph_osd_request { | |||
79 | struct ceph_file_layout r_file_layout; | 79 | struct ceph_file_layout r_file_layout; |
80 | struct ceph_snap_context *r_snapc; /* snap context for writes */ | 80 | struct ceph_snap_context *r_snapc; /* snap context for writes */ |
81 | unsigned r_num_pages; /* size of page array (follows) */ | 81 | unsigned r_num_pages; /* size of page array (follows) */ |
82 | unsigned r_page_alignment; /* io offset in first page */ | ||
82 | struct page **r_pages; /* pages for data payload */ | 83 | struct page **r_pages; /* pages for data payload */ |
83 | int r_pages_from_pool; | 84 | int r_pages_from_pool; |
84 | int r_own_pages; /* if true, i own page list */ | 85 | int r_own_pages; /* if true, i own page list */ |
@@ -194,7 +195,8 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, | |||
194 | int do_sync, u32 truncate_seq, | 195 | int do_sync, u32 truncate_seq, |
195 | u64 truncate_size, | 196 | u64 truncate_size, |
196 | struct timespec *mtime, | 197 | struct timespec *mtime, |
197 | bool use_mempool, int num_reply); | 198 | bool use_mempool, int num_reply, |
199 | int page_align); | ||
198 | 200 | ||
199 | static inline void ceph_osdc_get_request(struct ceph_osd_request *req) | 201 | static inline void ceph_osdc_get_request(struct ceph_osd_request *req) |
200 | { | 202 | { |
@@ -218,7 +220,8 @@ extern int ceph_osdc_readpages(struct ceph_osd_client *osdc, | |||
218 | struct ceph_file_layout *layout, | 220 | struct ceph_file_layout *layout, |
219 | u64 off, u64 *plen, | 221 | u64 off, u64 *plen, |
220 | u32 truncate_seq, u64 truncate_size, | 222 | u32 truncate_seq, u64 truncate_size, |
221 | struct page **pages, int nr_pages); | 223 | struct page **pages, int nr_pages, |
224 | int page_align); | ||
222 | 225 | ||
223 | extern int ceph_osdc_writepages(struct ceph_osd_client *osdc, | 226 | extern int ceph_osdc_writepages(struct ceph_osd_client *osdc, |
224 | struct ceph_vino vino, | 227 | struct ceph_vino vino, |
diff --git a/include/linux/fb.h b/include/linux/fb.h index 7fca3dc4e475..d1631d37e9e0 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
@@ -1122,6 +1122,7 @@ extern const struct fb_videomode *fb_find_best_display(const struct fb_monspecs | |||
1122 | 1122 | ||
1123 | /* drivers/video/fbcmap.c */ | 1123 | /* drivers/video/fbcmap.c */ |
1124 | extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp); | 1124 | extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp); |
1125 | extern int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags); | ||
1125 | extern void fb_dealloc_cmap(struct fb_cmap *cmap); | 1126 | extern void fb_dealloc_cmap(struct fb_cmap *cmap); |
1126 | extern int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to); | 1127 | extern int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to); |
1127 | extern int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to); | 1128 | extern int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 334d68a17108..c9e06cc70dad 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -34,9 +34,9 @@ | |||
34 | #define SEEK_MAX SEEK_END | 34 | #define SEEK_MAX SEEK_END |
35 | 35 | ||
36 | struct fstrim_range { | 36 | struct fstrim_range { |
37 | uint64_t start; | 37 | __u64 start; |
38 | uint64_t len; | 38 | __u64 len; |
39 | uint64_t minlen; | 39 | __u64 minlen; |
40 | }; | 40 | }; |
41 | 41 | ||
42 | /* And dynamically-tunable limits and defaults: */ | 42 | /* And dynamically-tunable limits and defaults: */ |
@@ -1612,7 +1612,6 @@ struct super_operations { | |||
1612 | ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); | 1612 | ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); |
1613 | #endif | 1613 | #endif |
1614 | int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); | 1614 | int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); |
1615 | int (*trim_fs) (struct super_block *, struct fstrim_range *); | ||
1616 | }; | 1615 | }; |
1617 | 1616 | ||
1618 | /* | 1617 | /* |
diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h index fc295d7ea463..781d4671415f 100644 --- a/include/linux/fsl-diu-fb.h +++ b/include/linux/fsl-diu-fb.h | |||
@@ -54,7 +54,6 @@ struct aoi_display_offset { | |||
54 | }; | 54 | }; |
55 | 55 | ||
56 | #define MFB_SET_CHROMA_KEY _IOW('M', 1, struct mfb_chroma_key) | 56 | #define MFB_SET_CHROMA_KEY _IOW('M', 1, struct mfb_chroma_key) |
57 | #define MFB_WAIT_FOR_VSYNC _IOW('F', 0x20, u_int32_t) | ||
58 | #define MFB_SET_BRIGHTNESS _IOW('M', 3, __u8) | 57 | #define MFB_SET_BRIGHTNESS _IOW('M', 3, __u8) |
59 | 58 | ||
60 | #define MFB_SET_ALPHA 0x80014d00 | 59 | #define MFB_SET_ALPHA 0x80014d00 |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 41cb31f14ee3..32f9fd6619b4 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -2,9 +2,6 @@ | |||
2 | #define LINUX_HARDIRQ_H | 2 | #define LINUX_HARDIRQ_H |
3 | 3 | ||
4 | #include <linux/preempt.h> | 4 | #include <linux/preempt.h> |
5 | #ifdef CONFIG_PREEMPT | ||
6 | #include <linux/smp_lock.h> | ||
7 | #endif | ||
8 | #include <linux/lockdep.h> | 5 | #include <linux/lockdep.h> |
9 | #include <linux/ftrace_irq.h> | 6 | #include <linux/ftrace_irq.h> |
10 | #include <asm/hardirq.h> | 7 | #include <asm/hardirq.h> |
@@ -97,7 +94,8 @@ | |||
97 | #define in_nmi() (preempt_count() & NMI_MASK) | 94 | #define in_nmi() (preempt_count() & NMI_MASK) |
98 | 95 | ||
99 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_BKL) | 96 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_BKL) |
100 | # define PREEMPT_INATOMIC_BASE kernel_locked() | 97 | # include <linux/sched.h> |
98 | # define PREEMPT_INATOMIC_BASE (current->lock_depth >= 0) | ||
101 | #else | 99 | #else |
102 | # define PREEMPT_INATOMIC_BASE 0 | 100 | # define PREEMPT_INATOMIC_BASE 0 |
103 | #endif | 101 | #endif |
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index e844a0b18695..4bef5c557160 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h | |||
@@ -32,28 +32,6 @@ | |||
32 | */ | 32 | */ |
33 | 33 | ||
34 | /* --- Bit algorithm adapters */ | 34 | /* --- Bit algorithm adapters */ |
35 | #define I2C_HW_B_BT848 0x010005 /* BT848 video boards */ | ||
36 | #define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */ | ||
37 | #define I2C_HW_B_ZR36067 0x010019 /* Zoran-36057/36067 based boards */ | ||
38 | #define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */ | 35 | #define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */ |
39 | #define I2C_HW_B_EM28XX 0x01001f /* em28xx video capture cards */ | ||
40 | #define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */ | ||
41 | #define I2C_HW_B_CX23885 0x010022 /* conexant 23885 based tv cards (bus1) */ | ||
42 | #define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */ | ||
43 | #define I2C_HW_B_CX231XX 0x010024 /* Conexant CX231XX USB based cards */ | ||
44 | #define I2C_HW_B_HDPVR 0x010025 /* Hauppauge HD PVR */ | ||
45 | |||
46 | /* --- SGI adapters */ | ||
47 | #define I2C_HW_SGI_VINO 0x160000 | ||
48 | |||
49 | /* --- SMBus only adapters */ | ||
50 | #define I2C_HW_SMBUS_W9968CF 0x04000d | ||
51 | #define I2C_HW_SMBUS_OV511 0x04000e /* OV511(+) USB 1.1 webcam ICs */ | ||
52 | #define I2C_HW_SMBUS_OV518 0x04000f /* OV518(+) USB 1.1 webcam ICs */ | ||
53 | #define I2C_HW_SMBUS_CAFE 0x040012 /* Marvell 88ALP01 "CAFE" cam */ | ||
54 | |||
55 | /* --- Miscellaneous adapters */ | ||
56 | #define I2C_HW_SAA7146 0x060000 /* SAA7146 video decoder bus */ | ||
57 | #define I2C_HW_SAA7134 0x090000 /* SAA7134 video decoder bus */ | ||
58 | 36 | ||
59 | #endif /* LINUX_I2C_ID_H */ | 37 | #endif /* LINUX_I2C_ID_H */ |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 889b35abaeda..56cfe23ffb39 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -353,7 +353,7 @@ struct i2c_algorithm { | |||
353 | */ | 353 | */ |
354 | struct i2c_adapter { | 354 | struct i2c_adapter { |
355 | struct module *owner; | 355 | struct module *owner; |
356 | unsigned int id; | 356 | unsigned int id __deprecated; |
357 | unsigned int class; /* classes to allow probing for */ | 357 | unsigned int class; /* classes to allow probing for */ |
358 | const struct i2c_algorithm *algo; /* the algorithm to access the bus */ | 358 | const struct i2c_algorithm *algo; /* the algorithm to access the bus */ |
359 | void *algo_data; | 359 | void *algo_data; |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index fc3da9e4da19..b6de9a6f7018 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -17,13 +17,11 @@ | |||
17 | #include <linux/bitops.h> | 17 | #include <linux/bitops.h> |
18 | #include <linux/log2.h> | 18 | #include <linux/log2.h> |
19 | #include <linux/typecheck.h> | 19 | #include <linux/typecheck.h> |
20 | #include <linux/printk.h> | ||
20 | #include <linux/dynamic_debug.h> | 21 | #include <linux/dynamic_debug.h> |
21 | #include <asm/byteorder.h> | 22 | #include <asm/byteorder.h> |
22 | #include <asm/bug.h> | 23 | #include <asm/bug.h> |
23 | 24 | ||
24 | extern const char linux_banner[]; | ||
25 | extern const char linux_proc_banner[]; | ||
26 | |||
27 | #define USHRT_MAX ((u16)(~0U)) | 25 | #define USHRT_MAX ((u16)(~0U)) |
28 | #define SHRT_MAX ((s16)(USHRT_MAX>>1)) | 26 | #define SHRT_MAX ((s16)(USHRT_MAX>>1)) |
29 | #define SHRT_MIN ((s16)(-SHRT_MAX - 1)) | 27 | #define SHRT_MIN ((s16)(-SHRT_MAX - 1)) |
@@ -110,31 +108,6 @@ extern const char linux_proc_banner[]; | |||
110 | */ | 108 | */ |
111 | #define lower_32_bits(n) ((u32)(n)) | 109 | #define lower_32_bits(n) ((u32)(n)) |
112 | 110 | ||
113 | #define KERN_EMERG "<0>" /* system is unusable */ | ||
114 | #define KERN_ALERT "<1>" /* action must be taken immediately */ | ||
115 | #define KERN_CRIT "<2>" /* critical conditions */ | ||
116 | #define KERN_ERR "<3>" /* error conditions */ | ||
117 | #define KERN_WARNING "<4>" /* warning conditions */ | ||
118 | #define KERN_NOTICE "<5>" /* normal but significant condition */ | ||
119 | #define KERN_INFO "<6>" /* informational */ | ||
120 | #define KERN_DEBUG "<7>" /* debug-level messages */ | ||
121 | |||
122 | /* Use the default kernel loglevel */ | ||
123 | #define KERN_DEFAULT "<d>" | ||
124 | /* | ||
125 | * Annotation for a "continued" line of log printout (only done after a | ||
126 | * line that had no enclosing \n). Only to be used by core/arch code | ||
127 | * during early bootup (a continued line is not SMP-safe otherwise). | ||
128 | */ | ||
129 | #define KERN_CONT "<c>" | ||
130 | |||
131 | extern int console_printk[]; | ||
132 | |||
133 | #define console_loglevel (console_printk[0]) | ||
134 | #define default_message_loglevel (console_printk[1]) | ||
135 | #define minimum_console_loglevel (console_printk[2]) | ||
136 | #define default_console_loglevel (console_printk[3]) | ||
137 | |||
138 | struct completion; | 111 | struct completion; |
139 | struct pt_regs; | 112 | struct pt_regs; |
140 | struct user; | 113 | struct user; |
@@ -187,11 +160,6 @@ static inline void might_fault(void) | |||
187 | } | 160 | } |
188 | #endif | 161 | #endif |
189 | 162 | ||
190 | struct va_format { | ||
191 | const char *fmt; | ||
192 | va_list *va; | ||
193 | }; | ||
194 | |||
195 | extern struct atomic_notifier_head panic_notifier_list; | 163 | extern struct atomic_notifier_head panic_notifier_list; |
196 | extern long (*panic_blink)(int state); | 164 | extern long (*panic_blink)(int state); |
197 | NORET_TYPE void panic(const char * fmt, ...) | 165 | NORET_TYPE void panic(const char * fmt, ...) |
@@ -245,115 +213,8 @@ extern int func_ptr_is_kernel_text(void *ptr); | |||
245 | struct pid; | 213 | struct pid; |
246 | extern struct pid *session_of_pgrp(struct pid *pgrp); | 214 | extern struct pid *session_of_pgrp(struct pid *pgrp); |
247 | 215 | ||
248 | /* | ||
249 | * FW_BUG | ||
250 | * Add this to a message where you are sure the firmware is buggy or behaves | ||
251 | * really stupid or out of spec. Be aware that the responsible BIOS developer | ||
252 | * should be able to fix this issue or at least get a concrete idea of the | ||
253 | * problem by reading your message without the need of looking at the kernel | ||
254 | * code. | ||
255 | * | ||
256 | * Use it for definite and high priority BIOS bugs. | ||
257 | * | ||
258 | * FW_WARN | ||
259 | * Use it for not that clear (e.g. could the kernel messed up things already?) | ||
260 | * and medium priority BIOS bugs. | ||
261 | * | ||
262 | * FW_INFO | ||
263 | * Use this one if you want to tell the user or vendor about something | ||
264 | * suspicious, but generally harmless related to the firmware. | ||
265 | * | ||
266 | * Use it for information or very low priority BIOS bugs. | ||
267 | */ | ||
268 | #define FW_BUG "[Firmware Bug]: " | ||
269 | #define FW_WARN "[Firmware Warn]: " | ||
270 | #define FW_INFO "[Firmware Info]: " | ||
271 | |||
272 | /* | ||
273 | * HW_ERR | ||
274 | * Add this to a message for hardware errors, so that user can report | ||
275 | * it to hardware vendor instead of LKML or software vendor. | ||
276 | */ | ||
277 | #define HW_ERR "[Hardware Error]: " | ||
278 | |||
279 | #ifdef CONFIG_PRINTK | ||
280 | asmlinkage int vprintk(const char *fmt, va_list args) | ||
281 | __attribute__ ((format (printf, 1, 0))); | ||
282 | asmlinkage int printk(const char * fmt, ...) | ||
283 | __attribute__ ((format (printf, 1, 2))) __cold; | ||
284 | |||
285 | /* | ||
286 | * Please don't use printk_ratelimit(), because it shares ratelimiting state | ||
287 | * with all other unrelated printk_ratelimit() callsites. Instead use | ||
288 | * printk_ratelimited() or plain old __ratelimit(). | ||
289 | */ | ||
290 | extern int __printk_ratelimit(const char *func); | ||
291 | #define printk_ratelimit() __printk_ratelimit(__func__) | ||
292 | extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, | ||
293 | unsigned int interval_msec); | ||
294 | |||
295 | extern int printk_delay_msec; | ||
296 | extern int dmesg_restrict; | ||
297 | |||
298 | /* | ||
299 | * Print a one-time message (analogous to WARN_ONCE() et al): | ||
300 | */ | ||
301 | #define printk_once(x...) ({ \ | ||
302 | static bool __print_once; \ | ||
303 | \ | ||
304 | if (!__print_once) { \ | ||
305 | __print_once = true; \ | ||
306 | printk(x); \ | ||
307 | } \ | ||
308 | }) | ||
309 | |||
310 | void log_buf_kexec_setup(void); | ||
311 | #else | ||
312 | static inline int vprintk(const char *s, va_list args) | ||
313 | __attribute__ ((format (printf, 1, 0))); | ||
314 | static inline int vprintk(const char *s, va_list args) { return 0; } | ||
315 | static inline int printk(const char *s, ...) | ||
316 | __attribute__ ((format (printf, 1, 2))); | ||
317 | static inline int __cold printk(const char *s, ...) { return 0; } | ||
318 | static inline int printk_ratelimit(void) { return 0; } | ||
319 | static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ | ||
320 | unsigned int interval_msec) \ | ||
321 | { return false; } | ||
322 | |||
323 | /* No effect, but we still get type checking even in the !PRINTK case: */ | ||
324 | #define printk_once(x...) printk(x) | ||
325 | |||
326 | static inline void log_buf_kexec_setup(void) | ||
327 | { | ||
328 | } | ||
329 | #endif | ||
330 | |||
331 | /* | ||
332 | * Dummy printk for disabled debugging statements to use whilst maintaining | ||
333 | * gcc's format and side-effect checking. | ||
334 | */ | ||
335 | static inline __attribute__ ((format (printf, 1, 2))) | ||
336 | int no_printk(const char *s, ...) { return 0; } | ||
337 | |||
338 | extern int printk_needs_cpu(int cpu); | ||
339 | extern void printk_tick(void); | ||
340 | |||
341 | extern void asmlinkage __attribute__((format(printf, 1, 2))) | ||
342 | early_printk(const char *fmt, ...); | ||
343 | |||
344 | unsigned long int_sqrt(unsigned long); | 216 | unsigned long int_sqrt(unsigned long); |
345 | 217 | ||
346 | static inline void console_silent(void) | ||
347 | { | ||
348 | console_loglevel = 0; | ||
349 | } | ||
350 | |||
351 | static inline void console_verbose(void) | ||
352 | { | ||
353 | if (console_loglevel) | ||
354 | console_loglevel = 15; | ||
355 | } | ||
356 | |||
357 | extern void bust_spinlocks(int yes); | 218 | extern void bust_spinlocks(int yes); |
358 | extern void wake_up_klogd(void); | 219 | extern void wake_up_klogd(void); |
359 | extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ | 220 | extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ |
@@ -390,22 +251,6 @@ extern enum system_states { | |||
390 | #define TAINT_CRAP 10 | 251 | #define TAINT_CRAP 10 |
391 | #define TAINT_FIRMWARE_WORKAROUND 11 | 252 | #define TAINT_FIRMWARE_WORKAROUND 11 |
392 | 253 | ||
393 | extern void dump_stack(void) __cold; | ||
394 | |||
395 | enum { | ||
396 | DUMP_PREFIX_NONE, | ||
397 | DUMP_PREFIX_ADDRESS, | ||
398 | DUMP_PREFIX_OFFSET | ||
399 | }; | ||
400 | extern void hex_dump_to_buffer(const void *buf, size_t len, | ||
401 | int rowsize, int groupsize, | ||
402 | char *linebuf, size_t linebuflen, bool ascii); | ||
403 | extern void print_hex_dump(const char *level, const char *prefix_str, | ||
404 | int prefix_type, int rowsize, int groupsize, | ||
405 | const void *buf, size_t len, bool ascii); | ||
406 | extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, | ||
407 | const void *buf, size_t len); | ||
408 | |||
409 | extern const char hex_asc[]; | 254 | extern const char hex_asc[]; |
410 | #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] | 255 | #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] |
411 | #define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] | 256 | #define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] |
@@ -419,94 +264,6 @@ static inline char *pack_hex_byte(char *buf, u8 byte) | |||
419 | 264 | ||
420 | extern int hex_to_bin(char ch); | 265 | extern int hex_to_bin(char ch); |
421 | 266 | ||
422 | #ifndef pr_fmt | ||
423 | #define pr_fmt(fmt) fmt | ||
424 | #endif | ||
425 | |||
426 | #define pr_emerg(fmt, ...) \ | ||
427 | printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) | ||
428 | #define pr_alert(fmt, ...) \ | ||
429 | printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) | ||
430 | #define pr_crit(fmt, ...) \ | ||
431 | printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) | ||
432 | #define pr_err(fmt, ...) \ | ||
433 | printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) | ||
434 | #define pr_warning(fmt, ...) \ | ||
435 | printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) | ||
436 | #define pr_warn pr_warning | ||
437 | #define pr_notice(fmt, ...) \ | ||
438 | printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) | ||
439 | #define pr_info(fmt, ...) \ | ||
440 | printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | ||
441 | #define pr_cont(fmt, ...) \ | ||
442 | printk(KERN_CONT fmt, ##__VA_ARGS__) | ||
443 | |||
444 | /* pr_devel() should produce zero code unless DEBUG is defined */ | ||
445 | #ifdef DEBUG | ||
446 | #define pr_devel(fmt, ...) \ | ||
447 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | ||
448 | #else | ||
449 | #define pr_devel(fmt, ...) \ | ||
450 | ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) | ||
451 | #endif | ||
452 | |||
453 | /* If you are writing a driver, please use dev_dbg instead */ | ||
454 | #if defined(DEBUG) | ||
455 | #define pr_debug(fmt, ...) \ | ||
456 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | ||
457 | #elif defined(CONFIG_DYNAMIC_DEBUG) | ||
458 | /* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ | ||
459 | #define pr_debug(fmt, ...) \ | ||
460 | dynamic_pr_debug(fmt, ##__VA_ARGS__) | ||
461 | #else | ||
462 | #define pr_debug(fmt, ...) \ | ||
463 | ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) | ||
464 | #endif | ||
465 | |||
466 | /* | ||
467 | * ratelimited messages with local ratelimit_state, | ||
468 | * no local ratelimit_state used in the !PRINTK case | ||
469 | */ | ||
470 | #ifdef CONFIG_PRINTK | ||
471 | #define printk_ratelimited(fmt, ...) ({ \ | ||
472 | static DEFINE_RATELIMIT_STATE(_rs, \ | ||
473 | DEFAULT_RATELIMIT_INTERVAL, \ | ||
474 | DEFAULT_RATELIMIT_BURST); \ | ||
475 | \ | ||
476 | if (__ratelimit(&_rs)) \ | ||
477 | printk(fmt, ##__VA_ARGS__); \ | ||
478 | }) | ||
479 | #else | ||
480 | /* No effect, but we still get type checking even in the !PRINTK case: */ | ||
481 | #define printk_ratelimited printk | ||
482 | #endif | ||
483 | |||
484 | #define pr_emerg_ratelimited(fmt, ...) \ | ||
485 | printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) | ||
486 | #define pr_alert_ratelimited(fmt, ...) \ | ||
487 | printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) | ||
488 | #define pr_crit_ratelimited(fmt, ...) \ | ||
489 | printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) | ||
490 | #define pr_err_ratelimited(fmt, ...) \ | ||
491 | printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) | ||
492 | #define pr_warning_ratelimited(fmt, ...) \ | ||
493 | printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) | ||
494 | #define pr_warn_ratelimited pr_warning_ratelimited | ||
495 | #define pr_notice_ratelimited(fmt, ...) \ | ||
496 | printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) | ||
497 | #define pr_info_ratelimited(fmt, ...) \ | ||
498 | printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | ||
499 | /* no pr_cont_ratelimited, don't do that... */ | ||
500 | /* If you are writing a driver, please use dev_dbg instead */ | ||
501 | #if defined(DEBUG) | ||
502 | #define pr_debug_ratelimited(fmt, ...) \ | ||
503 | printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | ||
504 | #else | ||
505 | #define pr_debug_ratelimited(fmt, ...) \ | ||
506 | ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \ | ||
507 | ##__VA_ARGS__); 0; }) | ||
508 | #endif | ||
509 | |||
510 | /* | 267 | /* |
511 | * General tracing related utility functions - trace_printk(), | 268 | * General tracing related utility functions - trace_printk(), |
512 | * tracing_on/tracing_off and tracing_start()/tracing_stop | 269 | * tracing_on/tracing_off and tracing_start()/tracing_stop |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 15b77b8dc7e1..d947b1231662 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -986,7 +986,7 @@ extern void ata_host_init(struct ata_host *, struct device *, | |||
986 | unsigned long, struct ata_port_operations *); | 986 | unsigned long, struct ata_port_operations *); |
987 | extern int ata_scsi_detect(struct scsi_host_template *sht); | 987 | extern int ata_scsi_detect(struct scsi_host_template *sht); |
988 | extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); | 988 | extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); |
989 | extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); | 989 | extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
990 | extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, | 990 | extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, |
991 | int cmd, void __user *arg); | 991 | int cmd, void __user *arg); |
992 | extern void ata_sas_port_destroy(struct ata_port *); | 992 | extern void ata_sas_port_destroy(struct ata_port *); |
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index a34dea46b629..2dee05e5119a 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h | |||
@@ -43,6 +43,7 @@ struct nlm_host { | |||
43 | struct sockaddr_storage h_addr; /* peer address */ | 43 | struct sockaddr_storage h_addr; /* peer address */ |
44 | size_t h_addrlen; | 44 | size_t h_addrlen; |
45 | struct sockaddr_storage h_srcaddr; /* our address (optional) */ | 45 | struct sockaddr_storage h_srcaddr; /* our address (optional) */ |
46 | size_t h_srcaddrlen; | ||
46 | struct rpc_clnt *h_rpcclnt; /* RPC client to talk to peer */ | 47 | struct rpc_clnt *h_rpcclnt; /* RPC client to talk to peer */ |
47 | char *h_name; /* remote hostname */ | 48 | char *h_name; /* remote hostname */ |
48 | u32 h_version; /* interface version */ | 49 | u32 h_version; /* interface version */ |
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 1ff81b51b656..dd3c34ebca9a 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #define MARVELL_PHY_ID_88E1118 0x01410e10 | 11 | #define MARVELL_PHY_ID_88E1118 0x01410e10 |
12 | #define MARVELL_PHY_ID_88E1121R 0x01410cb0 | 12 | #define MARVELL_PHY_ID_88E1121R 0x01410cb0 |
13 | #define MARVELL_PHY_ID_88E1145 0x01410cd0 | 13 | #define MARVELL_PHY_ID_88E1145 0x01410cd0 |
14 | #define MARVELL_PHY_ID_88E1149R 0x01410e50 | ||
14 | #define MARVELL_PHY_ID_88E1240 0x01410e30 | 15 | #define MARVELL_PHY_ID_88E1240 0x01410e30 |
15 | #define MARVELL_PHY_ID_88E1318S 0x01410e90 | 16 | #define MARVELL_PHY_ID_88E1318S 0x01410e90 |
16 | 17 | ||
diff --git a/include/linux/mfd/wm8350/audio.h b/include/linux/mfd/wm8350/audio.h index a95141eafce3..bd581c6fa085 100644 --- a/include/linux/mfd/wm8350/audio.h +++ b/include/linux/mfd/wm8350/audio.h | |||
@@ -522,9 +522,6 @@ | |||
522 | #define WM8350_MCLK_SEL_PLL_32K 3 | 522 | #define WM8350_MCLK_SEL_PLL_32K 3 |
523 | #define WM8350_MCLK_SEL_MCLK 5 | 523 | #define WM8350_MCLK_SEL_MCLK 5 |
524 | 524 | ||
525 | #define WM8350_MCLK_DIR_OUT 0 | ||
526 | #define WM8350_MCLK_DIR_IN 1 | ||
527 | |||
528 | /* clock divider id's */ | 525 | /* clock divider id's */ |
529 | #define WM8350_ADC_CLKDIV 0 | 526 | #define WM8350_ADC_CLKDIV 0 |
530 | #define WM8350_DAC_CLKDIV 1 | 527 | #define WM8350_DAC_CLKDIV 1 |
diff --git a/include/linux/module.h b/include/linux/module.h index b29e7458b966..7575bbbdf2a2 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -517,7 +517,7 @@ static inline void __module_get(struct module *module) | |||
517 | #define symbol_put_addr(p) do { } while(0) | 517 | #define symbol_put_addr(p) do { } while(0) |
518 | 518 | ||
519 | #endif /* CONFIG_MODULE_UNLOAD */ | 519 | #endif /* CONFIG_MODULE_UNLOAD */ |
520 | int use_module(struct module *a, struct module *b); | 520 | int ref_module(struct module *a, struct module *b); |
521 | 521 | ||
522 | /* This is a #define so the string doesn't get put in every .o file */ | 522 | /* This is a #define so the string doesn't get put in every .o file */ |
523 | #define module_name(mod) \ | 523 | #define module_name(mod) \ |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index bba26684acdc..c66fdb7d6998 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -593,12 +593,6 @@ nfs_fileid_to_ino_t(u64 fileid) | |||
593 | return ino; | 593 | return ino; |
594 | } | 594 | } |
595 | 595 | ||
596 | #define nfs_wait_event(clnt, wq, condition) \ | ||
597 | ({ \ | ||
598 | int __retval = wait_event_killable(wq, condition); \ | ||
599 | __retval; \ | ||
600 | }) | ||
601 | |||
602 | #define NFS_JUKEBOX_RETRY_TIME (5 * HZ) | 596 | #define NFS_JUKEBOX_RETRY_TIME (5 * HZ) |
603 | 597 | ||
604 | #endif /* __KERNEL__ */ | 598 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 5bb13b3db84d..b02195dfc1b0 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h | |||
@@ -59,8 +59,6 @@ static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \ | |||
59 | static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ | 59 | static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ |
60 | { return test_and_clear_bit(PCG_##lname, &pc->flags); } | 60 | { return test_and_clear_bit(PCG_##lname, &pc->flags); } |
61 | 61 | ||
62 | TESTPCGFLAG(Locked, LOCK) | ||
63 | |||
64 | /* Cache flag is set only once (at allocation) */ | 62 | /* Cache flag is set only once (at allocation) */ |
65 | TESTPCGFLAG(Cache, CACHE) | 63 | TESTPCGFLAG(Cache, CACHE) |
66 | CLEARPCGFLAG(Cache, CACHE) | 64 | CLEARPCGFLAG(Cache, CACHE) |
@@ -104,6 +102,11 @@ static inline void unlock_page_cgroup(struct page_cgroup *pc) | |||
104 | bit_spin_unlock(PCG_LOCK, &pc->flags); | 102 | bit_spin_unlock(PCG_LOCK, &pc->flags); |
105 | } | 103 | } |
106 | 104 | ||
105 | static inline int page_is_cgroup_locked(struct page_cgroup *pc) | ||
106 | { | ||
107 | return bit_spin_is_locked(PCG_LOCK, &pc->flags); | ||
108 | } | ||
109 | |||
107 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ | 110 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ |
108 | struct page_cgroup; | 111 | struct page_cgroup; |
109 | 112 | ||
diff --git a/include/linux/printk.h b/include/linux/printk.h new file mode 100644 index 000000000000..b772ca5fbdf0 --- /dev/null +++ b/include/linux/printk.h | |||
@@ -0,0 +1,248 @@ | |||
1 | #ifndef __KERNEL_PRINTK__ | ||
2 | #define __KERNEL_PRINTK__ | ||
3 | |||
4 | extern const char linux_banner[]; | ||
5 | extern const char linux_proc_banner[]; | ||
6 | |||
7 | #define KERN_EMERG "<0>" /* system is unusable */ | ||
8 | #define KERN_ALERT "<1>" /* action must be taken immediately */ | ||
9 | #define KERN_CRIT "<2>" /* critical conditions */ | ||
10 | #define KERN_ERR "<3>" /* error conditions */ | ||
11 | #define KERN_WARNING "<4>" /* warning conditions */ | ||
12 | #define KERN_NOTICE "<5>" /* normal but significant condition */ | ||
13 | #define KERN_INFO "<6>" /* informational */ | ||
14 | #define KERN_DEBUG "<7>" /* debug-level messages */ | ||
15 | |||
16 | /* Use the default kernel loglevel */ | ||
17 | #define KERN_DEFAULT "<d>" | ||
18 | /* | ||
19 | * Annotation for a "continued" line of log printout (only done after a | ||
20 | * line that had no enclosing \n). Only to be used by core/arch code | ||
21 | * during early bootup (a continued line is not SMP-safe otherwise). | ||
22 | */ | ||
23 | #define KERN_CONT "<c>" | ||
24 | |||
25 | extern int console_printk[]; | ||
26 | |||
27 | #define console_loglevel (console_printk[0]) | ||
28 | #define default_message_loglevel (console_printk[1]) | ||
29 | #define minimum_console_loglevel (console_printk[2]) | ||
30 | #define default_console_loglevel (console_printk[3]) | ||
31 | |||
32 | struct va_format { | ||
33 | const char *fmt; | ||
34 | va_list *va; | ||
35 | }; | ||
36 | |||
37 | /* | ||
38 | * FW_BUG | ||
39 | * Add this to a message where you are sure the firmware is buggy or behaves | ||
40 | * really stupid or out of spec. Be aware that the responsible BIOS developer | ||
41 | * should be able to fix this issue or at least get a concrete idea of the | ||
42 | * problem by reading your message without the need of looking at the kernel | ||
43 | * code. | ||
44 | * | ||
45 | * Use it for definite and high priority BIOS bugs. | ||
46 | * | ||
47 | * FW_WARN | ||
48 | * Use it for not that clear (e.g. could the kernel messed up things already?) | ||
49 | * and medium priority BIOS bugs. | ||
50 | * | ||
51 | * FW_INFO | ||
52 | * Use this one if you want to tell the user or vendor about something | ||
53 | * suspicious, but generally harmless related to the firmware. | ||
54 | * | ||
55 | * Use it for information or very low priority BIOS bugs. | ||
56 | */ | ||
57 | #define FW_BUG "[Firmware Bug]: " | ||
58 | #define FW_WARN "[Firmware Warn]: " | ||
59 | #define FW_INFO "[Firmware Info]: " | ||
60 | |||
61 | /* | ||
62 | * HW_ERR | ||
63 | * Add this to a message for hardware errors, so that user can report | ||
64 | * it to hardware vendor instead of LKML or software vendor. | ||
65 | */ | ||
66 | #define HW_ERR "[Hardware Error]: " | ||
67 | |||
68 | #ifdef CONFIG_PRINTK | ||
69 | asmlinkage int vprintk(const char *fmt, va_list args) | ||
70 | __attribute__ ((format (printf, 1, 0))); | ||
71 | asmlinkage int printk(const char * fmt, ...) | ||
72 | __attribute__ ((format (printf, 1, 2))) __cold; | ||
73 | |||
74 | /* | ||
75 | * Please don't use printk_ratelimit(), because it shares ratelimiting state | ||
76 | * with all other unrelated printk_ratelimit() callsites. Instead use | ||
77 | * printk_ratelimited() or plain old __ratelimit(). | ||
78 | */ | ||
79 | extern int __printk_ratelimit(const char *func); | ||
80 | #define printk_ratelimit() __printk_ratelimit(__func__) | ||
81 | extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, | ||
82 | unsigned int interval_msec); | ||
83 | |||
84 | extern int printk_delay_msec; | ||
85 | extern int dmesg_restrict; | ||
86 | |||
87 | /* | ||
88 | * Print a one-time message (analogous to WARN_ONCE() et al): | ||
89 | */ | ||
90 | #define printk_once(x...) ({ \ | ||
91 | static bool __print_once; \ | ||
92 | \ | ||
93 | if (!__print_once) { \ | ||
94 | __print_once = true; \ | ||
95 | printk(x); \ | ||
96 | } \ | ||
97 | }) | ||
98 | |||
99 | void log_buf_kexec_setup(void); | ||
100 | #else | ||
101 | static inline int vprintk(const char *s, va_list args) | ||
102 | __attribute__ ((format (printf, 1, 0))); | ||
103 | static inline int vprintk(const char *s, va_list args) { return 0; } | ||
104 | static inline int printk(const char *s, ...) | ||
105 | __attribute__ ((format (printf, 1, 2))); | ||
106 | static inline int __cold printk(const char *s, ...) { return 0; } | ||
107 | static inline int printk_ratelimit(void) { return 0; } | ||
108 | static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ | ||
109 | unsigned int interval_msec) \ | ||
110 | { return false; } | ||
111 | |||
112 | /* No effect, but we still get type checking even in the !PRINTK case: */ | ||
113 | #define printk_once(x...) printk(x) | ||
114 | |||
115 | static inline void log_buf_kexec_setup(void) | ||
116 | { | ||
117 | } | ||
118 | #endif | ||
119 | |||
120 | /* | ||
121 | * Dummy printk for disabled debugging statements to use whilst maintaining | ||
122 | * gcc's format and side-effect checking. | ||
123 | */ | ||
124 | static inline __attribute__ ((format (printf, 1, 2))) | ||
125 | int no_printk(const char *s, ...) { return 0; } | ||
126 | |||
127 | extern int printk_needs_cpu(int cpu); | ||
128 | extern void printk_tick(void); | ||
129 | |||
130 | extern void asmlinkage __attribute__((format(printf, 1, 2))) | ||
131 | early_printk(const char *fmt, ...); | ||
132 | |||
133 | static inline void console_silent(void) | ||
134 | { | ||
135 | console_loglevel = 0; | ||
136 | } | ||
137 | |||
138 | static inline void console_verbose(void) | ||
139 | { | ||
140 | if (console_loglevel) | ||
141 | console_loglevel = 15; | ||
142 | } | ||
143 | |||
144 | extern void dump_stack(void) __cold; | ||
145 | |||
146 | enum { | ||
147 | DUMP_PREFIX_NONE, | ||
148 | DUMP_PREFIX_ADDRESS, | ||
149 | DUMP_PREFIX_OFFSET | ||
150 | }; | ||
151 | extern void hex_dump_to_buffer(const void *buf, size_t len, | ||
152 | int rowsize, int groupsize, | ||
153 | char *linebuf, size_t linebuflen, bool ascii); | ||
154 | extern void print_hex_dump(const char *level, const char *prefix_str, | ||
155 | int prefix_type, int rowsize, int groupsize, | ||
156 | const void *buf, size_t len, bool ascii); | ||
157 | extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, | ||
158 | const void *buf, size_t len); | ||
159 | |||
160 | #ifndef pr_fmt | ||
161 | #define pr_fmt(fmt) fmt | ||
162 | #endif | ||
163 | |||
164 | #define pr_emerg(fmt, ...) \ | ||
165 | printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) | ||
166 | #define pr_alert(fmt, ...) \ | ||
167 | printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) | ||
168 | #define pr_crit(fmt, ...) \ | ||
169 | printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) | ||
170 | #define pr_err(fmt, ...) \ | ||
171 | printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) | ||
172 | #define pr_warning(fmt, ...) \ | ||
173 | printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) | ||
174 | #define pr_warn pr_warning | ||
175 | #define pr_notice(fmt, ...) \ | ||
176 | printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) | ||
177 | #define pr_info(fmt, ...) \ | ||
178 | printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | ||
179 | #define pr_cont(fmt, ...) \ | ||
180 | printk(KERN_CONT fmt, ##__VA_ARGS__) | ||
181 | |||
182 | /* pr_devel() should produce zero code unless DEBUG is defined */ | ||
183 | #ifdef DEBUG | ||
184 | #define pr_devel(fmt, ...) \ | ||
185 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | ||
186 | #else | ||
187 | #define pr_devel(fmt, ...) \ | ||
188 | ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) | ||
189 | #endif | ||
190 | |||
191 | /* If you are writing a driver, please use dev_dbg instead */ | ||
192 | #if defined(DEBUG) | ||
193 | #define pr_debug(fmt, ...) \ | ||
194 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | ||
195 | #elif defined(CONFIG_DYNAMIC_DEBUG) | ||
196 | /* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ | ||
197 | #define pr_debug(fmt, ...) \ | ||
198 | dynamic_pr_debug(fmt, ##__VA_ARGS__) | ||
199 | #else | ||
200 | #define pr_debug(fmt, ...) \ | ||
201 | ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) | ||
202 | #endif | ||
203 | |||
204 | /* | ||
205 | * ratelimited messages with local ratelimit_state, | ||
206 | * no local ratelimit_state used in the !PRINTK case | ||
207 | */ | ||
208 | #ifdef CONFIG_PRINTK | ||
209 | #define printk_ratelimited(fmt, ...) ({ \ | ||
210 | static DEFINE_RATELIMIT_STATE(_rs, \ | ||
211 | DEFAULT_RATELIMIT_INTERVAL, \ | ||
212 | DEFAULT_RATELIMIT_BURST); \ | ||
213 | \ | ||
214 | if (__ratelimit(&_rs)) \ | ||
215 | printk(fmt, ##__VA_ARGS__); \ | ||
216 | }) | ||
217 | #else | ||
218 | /* No effect, but we still get type checking even in the !PRINTK case: */ | ||
219 | #define printk_ratelimited printk | ||
220 | #endif | ||
221 | |||
222 | #define pr_emerg_ratelimited(fmt, ...) \ | ||
223 | printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) | ||
224 | #define pr_alert_ratelimited(fmt, ...) \ | ||
225 | printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) | ||
226 | #define pr_crit_ratelimited(fmt, ...) \ | ||
227 | printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) | ||
228 | #define pr_err_ratelimited(fmt, ...) \ | ||
229 | printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) | ||
230 | #define pr_warning_ratelimited(fmt, ...) \ | ||
231 | printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) | ||
232 | #define pr_warn_ratelimited pr_warning_ratelimited | ||
233 | #define pr_notice_ratelimited(fmt, ...) \ | ||
234 | printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) | ||
235 | #define pr_info_ratelimited(fmt, ...) \ | ||
236 | printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | ||
237 | /* no pr_cont_ratelimited, don't do that... */ | ||
238 | /* If you are writing a driver, please use dev_dbg instead */ | ||
239 | #if defined(DEBUG) | ||
240 | #define pr_debug_ratelimited(fmt, ...) \ | ||
241 | printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | ||
242 | #else | ||
243 | #define pr_debug_ratelimited(fmt, ...) \ | ||
244 | ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \ | ||
245 | ##__VA_ARGS__); 0; }) | ||
246 | #endif | ||
247 | |||
248 | #endif | ||
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 5ca47e59b727..c21072adbfad 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <asm/unaligned.h> | 22 | #include <asm/unaligned.h> |
23 | #include <linux/bitops.h> | 23 | #include <linux/bitops.h> |
24 | #include <linux/proc_fs.h> | 24 | #include <linux/proc_fs.h> |
25 | #include <linux/smp_lock.h> | ||
26 | #include <linux/buffer_head.h> | 25 | #include <linux/buffer_head.h> |
27 | #include <linux/reiserfs_fs_i.h> | 26 | #include <linux/reiserfs_fs_i.h> |
28 | #include <linux/reiserfs_fs_sb.h> | 27 | #include <linux/reiserfs_fs_sb.h> |
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index d42f274418b8..bbad657a3725 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
@@ -6,7 +6,6 @@ | |||
6 | #include <linux/if_link.h> | 6 | #include <linux/if_link.h> |
7 | #include <linux/if_addr.h> | 7 | #include <linux/if_addr.h> |
8 | #include <linux/neighbour.h> | 8 | #include <linux/neighbour.h> |
9 | #include <linux/netdevice.h> | ||
10 | 9 | ||
11 | /* rtnetlink families. Values up to 127 are reserved for real address | 10 | /* rtnetlink families. Values up to 127 are reserved for real address |
12 | * families, values above 128 may be used arbitrarily. | 11 | * families, values above 128 may be used arbitrarily. |
@@ -606,6 +605,7 @@ struct tcamsg { | |||
606 | #ifdef __KERNEL__ | 605 | #ifdef __KERNEL__ |
607 | 606 | ||
608 | #include <linux/mutex.h> | 607 | #include <linux/mutex.h> |
608 | #include <linux/netdevice.h> | ||
609 | 609 | ||
610 | static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str) | 610 | static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str) |
611 | { | 611 | { |
diff --git a/include/linux/sched.h b/include/linux/sched.h index d0036e52a24a..2c79e921a68b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -862,6 +862,7 @@ struct sched_group { | |||
862 | * single CPU. | 862 | * single CPU. |
863 | */ | 863 | */ |
864 | unsigned int cpu_power, cpu_power_orig; | 864 | unsigned int cpu_power, cpu_power_orig; |
865 | unsigned int group_weight; | ||
865 | 866 | ||
866 | /* | 867 | /* |
867 | * The CPUs this group covers. | 868 | * The CPUs this group covers. |
diff --git a/include/linux/security.h b/include/linux/security.h index b8246a8df7d2..fd4d55fb8845 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -77,7 +77,6 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, | |||
77 | extern int cap_task_setscheduler(struct task_struct *p); | 77 | extern int cap_task_setscheduler(struct task_struct *p); |
78 | extern int cap_task_setioprio(struct task_struct *p, int ioprio); | 78 | extern int cap_task_setioprio(struct task_struct *p, int ioprio); |
79 | extern int cap_task_setnice(struct task_struct *p, int nice); | 79 | extern int cap_task_setnice(struct task_struct *p, int nice); |
80 | extern int cap_syslog(int type, bool from_file); | ||
81 | extern int cap_vm_enough_memory(struct mm_struct *mm, long pages); | 80 | extern int cap_vm_enough_memory(struct mm_struct *mm, long pages); |
82 | 81 | ||
83 | struct msghdr; | 82 | struct msghdr; |
@@ -1388,7 +1387,7 @@ struct security_operations { | |||
1388 | int (*sysctl) (struct ctl_table *table, int op); | 1387 | int (*sysctl) (struct ctl_table *table, int op); |
1389 | int (*quotactl) (int cmds, int type, int id, struct super_block *sb); | 1388 | int (*quotactl) (int cmds, int type, int id, struct super_block *sb); |
1390 | int (*quota_on) (struct dentry *dentry); | 1389 | int (*quota_on) (struct dentry *dentry); |
1391 | int (*syslog) (int type, bool from_file); | 1390 | int (*syslog) (int type); |
1392 | int (*settime) (struct timespec *ts, struct timezone *tz); | 1391 | int (*settime) (struct timespec *ts, struct timezone *tz); |
1393 | int (*vm_enough_memory) (struct mm_struct *mm, long pages); | 1392 | int (*vm_enough_memory) (struct mm_struct *mm, long pages); |
1394 | 1393 | ||
@@ -1671,7 +1670,7 @@ int security_real_capable_noaudit(struct task_struct *tsk, int cap); | |||
1671 | int security_sysctl(struct ctl_table *table, int op); | 1670 | int security_sysctl(struct ctl_table *table, int op); |
1672 | int security_quotactl(int cmds, int type, int id, struct super_block *sb); | 1671 | int security_quotactl(int cmds, int type, int id, struct super_block *sb); |
1673 | int security_quota_on(struct dentry *dentry); | 1672 | int security_quota_on(struct dentry *dentry); |
1674 | int security_syslog(int type, bool from_file); | 1673 | int security_syslog(int type); |
1675 | int security_settime(struct timespec *ts, struct timezone *tz); | 1674 | int security_settime(struct timespec *ts, struct timezone *tz); |
1676 | int security_vm_enough_memory(long pages); | 1675 | int security_vm_enough_memory(long pages); |
1677 | int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); | 1676 | int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); |
@@ -1901,9 +1900,9 @@ static inline int security_quota_on(struct dentry *dentry) | |||
1901 | return 0; | 1900 | return 0; |
1902 | } | 1901 | } |
1903 | 1902 | ||
1904 | static inline int security_syslog(int type, bool from_file) | 1903 | static inline int security_syslog(int type) |
1905 | { | 1904 | { |
1906 | return cap_syslog(type, from_file); | 1905 | return 0; |
1907 | } | 1906 | } |
1908 | 1907 | ||
1909 | static inline int security_settime(struct timespec *ts, struct timezone *tz) | 1908 | static inline int security_settime(struct timespec *ts, struct timezone *tz) |
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h index cea0c38e7a63..9a52f72527dc 100644 --- a/include/linux/sh_clk.h +++ b/include/linux/sh_clk.h | |||
@@ -19,11 +19,13 @@ struct clk_mapping { | |||
19 | }; | 19 | }; |
20 | 20 | ||
21 | struct clk_ops { | 21 | struct clk_ops { |
22 | #ifdef CONFIG_SH_CLK_CPG_LEGACY | ||
22 | void (*init)(struct clk *clk); | 23 | void (*init)(struct clk *clk); |
24 | #endif | ||
23 | int (*enable)(struct clk *clk); | 25 | int (*enable)(struct clk *clk); |
24 | void (*disable)(struct clk *clk); | 26 | void (*disable)(struct clk *clk); |
25 | unsigned long (*recalc)(struct clk *clk); | 27 | unsigned long (*recalc)(struct clk *clk); |
26 | int (*set_rate)(struct clk *clk, unsigned long rate, int algo_id); | 28 | int (*set_rate)(struct clk *clk, unsigned long rate); |
27 | int (*set_parent)(struct clk *clk, struct clk *parent); | 29 | int (*set_parent)(struct clk *clk, struct clk *parent); |
28 | long (*round_rate)(struct clk *clk, unsigned long rate); | 30 | long (*round_rate)(struct clk *clk, unsigned long rate); |
29 | }; | 31 | }; |
@@ -67,36 +69,6 @@ int clk_register(struct clk *); | |||
67 | void clk_unregister(struct clk *); | 69 | void clk_unregister(struct clk *); |
68 | void clk_enable_init_clocks(void); | 70 | void clk_enable_init_clocks(void); |
69 | 71 | ||
70 | /** | ||
71 | * clk_set_rate_ex - set the clock rate for a clock source, with additional parameter | ||
72 | * @clk: clock source | ||
73 | * @rate: desired clock rate in Hz | ||
74 | * @algo_id: algorithm id to be passed down to ops->set_rate | ||
75 | * | ||
76 | * Returns success (0) or negative errno. | ||
77 | */ | ||
78 | int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id); | ||
79 | |||
80 | enum clk_sh_algo_id { | ||
81 | NO_CHANGE = 0, | ||
82 | |||
83 | IUS_N1_N1, | ||
84 | IUS_322, | ||
85 | IUS_522, | ||
86 | IUS_N11, | ||
87 | |||
88 | SB_N1, | ||
89 | |||
90 | SB3_N1, | ||
91 | SB3_32, | ||
92 | SB3_43, | ||
93 | SB3_54, | ||
94 | |||
95 | BP_N1, | ||
96 | |||
97 | IP_N1, | ||
98 | }; | ||
99 | |||
100 | struct clk_div_mult_table { | 72 | struct clk_div_mult_table { |
101 | unsigned int *divisors; | 73 | unsigned int *divisors; |
102 | unsigned int nr_divisors; | 74 | unsigned int nr_divisors; |
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h index f656d1a43dc0..5812fefbcedf 100644 --- a/include/linux/sh_intc.h +++ b/include/linux/sh_intc.h | |||
@@ -79,7 +79,7 @@ struct intc_hw_desc { | |||
79 | unsigned int nr_subgroups; | 79 | unsigned int nr_subgroups; |
80 | }; | 80 | }; |
81 | 81 | ||
82 | #define _INTC_ARRAY(a) a, a == NULL ? 0 : sizeof(a)/sizeof(*a) | 82 | #define _INTC_ARRAY(a) a, __same_type(a, NULL) ? 0 : sizeof(a)/sizeof(*a) |
83 | 83 | ||
84 | #define INTC_HW_DESC(vectors, groups, mask_regs, \ | 84 | #define INTC_HW_DESC(vectors, groups, mask_regs, \ |
85 | prio_regs, sense_regs, ack_regs) \ | 85 | prio_regs, sense_regs, ack_regs) \ |
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index 291f721144c2..3a1988202731 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h | |||
@@ -4,8 +4,6 @@ | |||
4 | #ifdef CONFIG_LOCK_KERNEL | 4 | #ifdef CONFIG_LOCK_KERNEL |
5 | #include <linux/sched.h> | 5 | #include <linux/sched.h> |
6 | 6 | ||
7 | #define kernel_locked() (current->lock_depth >= 0) | ||
8 | |||
9 | extern int __lockfunc __reacquire_kernel_lock(void); | 7 | extern int __lockfunc __reacquire_kernel_lock(void); |
10 | extern void __lockfunc __release_kernel_lock(void); | 8 | extern void __lockfunc __release_kernel_lock(void); |
11 | 9 | ||
@@ -58,7 +56,6 @@ static inline void cycle_kernel_lock(void) | |||
58 | #define lock_kernel() | 56 | #define lock_kernel() |
59 | #define unlock_kernel() | 57 | #define unlock_kernel() |
60 | #define cycle_kernel_lock() do { } while(0) | 58 | #define cycle_kernel_lock() do { } while(0) |
61 | #define kernel_locked() 1 | ||
62 | #endif /* CONFIG_BKL */ | 59 | #endif /* CONFIG_BKL */ |
63 | 60 | ||
64 | #define release_kernel_lock(task) do { } while(0) | 61 | #define release_kernel_lock(task) do { } while(0) |
diff --git a/include/linux/tty.h b/include/linux/tty.h index c7ea9bc8897c..032d79ff1d9d 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/tty_driver.h> | 13 | #include <linux/tty_driver.h> |
14 | #include <linux/tty_ldisc.h> | 14 | #include <linux/tty_ldisc.h> |
15 | #include <linux/mutex.h> | 15 | #include <linux/mutex.h> |
16 | #include <linux/smp_lock.h> | ||
17 | 16 | ||
18 | #include <asm/system.h> | 17 | #include <asm/system.h> |
19 | 18 | ||
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 2a7936d7851d..97b8b7c9b63c 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h | |||
@@ -1355,7 +1355,7 @@ enum wiphy_flags { | |||
1355 | WIPHY_FLAG_4ADDR_AP = BIT(5), | 1355 | WIPHY_FLAG_4ADDR_AP = BIT(5), |
1356 | WIPHY_FLAG_4ADDR_STATION = BIT(6), | 1356 | WIPHY_FLAG_4ADDR_STATION = BIT(6), |
1357 | WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7), | 1357 | WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7), |
1358 | WIPHY_FLAG_IBSS_RSN = BIT(7), | 1358 | WIPHY_FLAG_IBSS_RSN = BIT(8), |
1359 | }; | 1359 | }; |
1360 | 1360 | ||
1361 | struct mac_address { | 1361 | struct mac_address { |
diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 55590ab16b3e..6beb1ffc2b7f 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h | |||
@@ -303,7 +303,7 @@ static inline void neigh_confirm(struct neighbour *neigh) | |||
303 | 303 | ||
304 | static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) | 304 | static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) |
305 | { | 305 | { |
306 | unsigned long now = ACCESS_ONCE(jiffies); | 306 | unsigned long now = jiffies; |
307 | 307 | ||
308 | if (neigh->used != now) | 308 | if (neigh->used != now) |
309 | neigh->used = now; | 309 | neigh->used = now; |
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index f986ab7ffe6f..5c4c1678f7be 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h | |||
@@ -1006,8 +1006,7 @@ void fc_fcp_destroy(struct fc_lport *); | |||
1006 | /* | 1006 | /* |
1007 | * SCSI INTERACTION LAYER | 1007 | * SCSI INTERACTION LAYER |
1008 | *****************************/ | 1008 | *****************************/ |
1009 | int fc_queuecommand(struct scsi_cmnd *, | 1009 | int fc_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); |
1010 | void (*done)(struct scsi_cmnd *)); | ||
1011 | int fc_eh_abort(struct scsi_cmnd *); | 1010 | int fc_eh_abort(struct scsi_cmnd *); |
1012 | int fc_eh_device_reset(struct scsi_cmnd *); | 1011 | int fc_eh_device_reset(struct scsi_cmnd *); |
1013 | int fc_eh_host_reset(struct scsi_cmnd *); | 1012 | int fc_eh_host_reset(struct scsi_cmnd *); |
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index ae5196aae1a5..b81d969ddc67 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h | |||
@@ -341,8 +341,7 @@ extern int iscsi_eh_abort(struct scsi_cmnd *sc); | |||
341 | extern int iscsi_eh_recover_target(struct scsi_cmnd *sc); | 341 | extern int iscsi_eh_recover_target(struct scsi_cmnd *sc); |
342 | extern int iscsi_eh_session_reset(struct scsi_cmnd *sc); | 342 | extern int iscsi_eh_session_reset(struct scsi_cmnd *sc); |
343 | extern int iscsi_eh_device_reset(struct scsi_cmnd *sc); | 343 | extern int iscsi_eh_device_reset(struct scsi_cmnd *sc); |
344 | extern int iscsi_queuecommand(struct scsi_cmnd *sc, | 344 | extern int iscsi_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *sc); |
345 | void (*done)(struct scsi_cmnd *)); | ||
346 | 345 | ||
347 | /* | 346 | /* |
348 | * iSCSI host helpers. | 347 | * iSCSI host helpers. |
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 3dec1949f69c..90ce527ecf3d 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h | |||
@@ -621,8 +621,7 @@ int sas_set_phy_speed(struct sas_phy *phy, | |||
621 | int sas_phy_enable(struct sas_phy *phy, int enabled); | 621 | int sas_phy_enable(struct sas_phy *phy, int enabled); |
622 | int sas_phy_reset(struct sas_phy *phy, int hard_reset); | 622 | int sas_phy_reset(struct sas_phy *phy, int hard_reset); |
623 | int sas_queue_up(struct sas_task *task); | 623 | int sas_queue_up(struct sas_task *task); |
624 | extern int sas_queuecommand(struct scsi_cmnd *, | 624 | extern int sas_queuecommand(struct Scsi_Host * ,struct scsi_cmnd *); |
625 | void (*scsi_done)(struct scsi_cmnd *)); | ||
626 | extern int sas_target_alloc(struct scsi_target *); | 625 | extern int sas_target_alloc(struct scsi_target *); |
627 | extern int sas_slave_alloc(struct scsi_device *); | 626 | extern int sas_slave_alloc(struct scsi_device *); |
628 | extern int sas_slave_configure(struct scsi_device *); | 627 | extern int sas_slave_configure(struct scsi_device *); |
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index d0a6a845f204..e7e385842a38 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h | |||
@@ -127,8 +127,7 @@ struct scsi_host_template { | |||
127 | * | 127 | * |
128 | * STATUS: REQUIRED | 128 | * STATUS: REQUIRED |
129 | */ | 129 | */ |
130 | int (* queuecommand)(struct scsi_cmnd *, | 130 | int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); |
131 | void (*done)(struct scsi_cmnd *)); | ||
132 | 131 | ||
133 | /* | 132 | /* |
134 | * The transfer functions are used to queue a scsi command to | 133 | * The transfer functions are used to queue a scsi command to |
@@ -505,6 +504,25 @@ struct scsi_host_template { | |||
505 | }; | 504 | }; |
506 | 505 | ||
507 | /* | 506 | /* |
507 | * Temporary #define for host lock push down. Can be removed when all | ||
508 | * drivers have been updated to take advantage of unlocked | ||
509 | * queuecommand. | ||
510 | * | ||
511 | */ | ||
512 | #define DEF_SCSI_QCMD(func_name) \ | ||
513 | int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd) \ | ||
514 | { \ | ||
515 | unsigned long irq_flags; \ | ||
516 | int rc; \ | ||
517 | spin_lock_irqsave(shost->host_lock, irq_flags); \ | ||
518 | scsi_cmd_get_serial(shost, cmd); \ | ||
519 | rc = func_name##_lck (cmd, cmd->scsi_done); \ | ||
520 | spin_unlock_irqrestore(shost->host_lock, irq_flags); \ | ||
521 | return rc; \ | ||
522 | } | ||
523 | |||
524 | |||
525 | /* | ||
508 | * shost state: If you alter this, you also need to alter scsi_sysfs.c | 526 | * shost state: If you alter this, you also need to alter scsi_sysfs.c |
509 | * (for the ascii descriptions) and the state model enforcer: | 527 | * (for the ascii descriptions) and the state model enforcer: |
510 | * scsi_host_set_state() | 528 | * scsi_host_set_state() |
@@ -752,6 +770,7 @@ extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); | |||
752 | extern void scsi_host_put(struct Scsi_Host *t); | 770 | extern void scsi_host_put(struct Scsi_Host *t); |
753 | extern struct Scsi_Host *scsi_host_lookup(unsigned short); | 771 | extern struct Scsi_Host *scsi_host_lookup(unsigned short); |
754 | extern const char *scsi_host_state_name(enum scsi_host_state); | 772 | extern const char *scsi_host_state_name(enum scsi_host_state); |
773 | extern void scsi_cmd_get_serial(struct Scsi_Host *, struct scsi_cmnd *); | ||
755 | 774 | ||
756 | extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *); | 775 | extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *); |
757 | 776 | ||
diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h index 6316cdabf73f..89d43b3d4cb9 100644 --- a/include/video/da8xx-fb.h +++ b/include/video/da8xx-fb.h | |||
@@ -99,7 +99,6 @@ struct lcd_sync_arg { | |||
99 | #define FBIPUT_COLOR _IOW('F', 6, int) | 99 | #define FBIPUT_COLOR _IOW('F', 6, int) |
100 | #define FBIPUT_HSYNC _IOW('F', 9, int) | 100 | #define FBIPUT_HSYNC _IOW('F', 9, int) |
101 | #define FBIPUT_VSYNC _IOW('F', 10, int) | 101 | #define FBIPUT_VSYNC _IOW('F', 10, int) |
102 | #define FBIO_WAITFORVSYNC _IOW('F', 0x20, u_int32_t) | ||
103 | 102 | ||
104 | #endif /* ifndef DA8XX_FB_H */ | 103 | #endif /* ifndef DA8XX_FB_H */ |
105 | 104 | ||
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h index d7a6c13bde69..eac3ce153719 100644 --- a/include/xen/interface/memory.h +++ b/include/xen/interface/memory.h | |||
@@ -141,6 +141,19 @@ struct xen_machphys_mfn_list { | |||
141 | DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list); | 141 | DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list); |
142 | 142 | ||
143 | /* | 143 | /* |
144 | * Returns the location in virtual address space of the machine_to_phys | ||
145 | * mapping table. Architectures which do not have a m2p table, or which do not | ||
146 | * map it by default into guest address space, do not implement this command. | ||
147 | * arg == addr of xen_machphys_mapping_t. | ||
148 | */ | ||
149 | #define XENMEM_machphys_mapping 12 | ||
150 | struct xen_machphys_mapping { | ||
151 | unsigned long v_start, v_end; /* Start and end virtual addresses. */ | ||
152 | unsigned long max_mfn; /* Maximum MFN that can be looked up. */ | ||
153 | }; | ||
154 | DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mapping_t); | ||
155 | |||
156 | /* | ||
144 | * Sets the GPFN at which a particular page appears in the specified guest's | 157 | * Sets the GPFN at which a particular page appears in the specified guest's |
145 | * pseudophysical address space. | 158 | * pseudophysical address space. |
146 | * arg == addr of xen_add_to_physmap_t. | 159 | * arg == addr of xen_add_to_physmap_t. |
diff --git a/include/xen/page.h b/include/xen/page.h index eaf85fab1263..0be36b976f4b 100644 --- a/include/xen/page.h +++ b/include/xen/page.h | |||
@@ -1 +1,8 @@ | |||
1 | #ifndef _XEN_PAGE_H | ||
2 | #define _XEN_PAGE_H | ||
3 | |||
1 | #include <asm/xen/page.h> | 4 | #include <asm/xen/page.h> |
5 | |||
6 | extern phys_addr_t xen_extra_mem_start, xen_extra_mem_size; | ||
7 | |||
8 | #endif /* _XEN_PAGE_H */ | ||
diff --git a/include/xen/privcmd.h b/include/xen/privcmd.h index b42cdfd92fee..17857fb4d550 100644 --- a/include/xen/privcmd.h +++ b/include/xen/privcmd.h | |||
@@ -34,13 +34,10 @@ | |||
34 | #define __LINUX_PUBLIC_PRIVCMD_H__ | 34 | #define __LINUX_PUBLIC_PRIVCMD_H__ |
35 | 35 | ||
36 | #include <linux/types.h> | 36 | #include <linux/types.h> |
37 | #include <linux/compiler.h> | ||
37 | 38 | ||
38 | typedef unsigned long xen_pfn_t; | 39 | typedef unsigned long xen_pfn_t; |
39 | 40 | ||
40 | #ifndef __user | ||
41 | #define __user | ||
42 | #endif | ||
43 | |||
44 | struct privcmd_hypercall { | 41 | struct privcmd_hypercall { |
45 | __u64 op; | 42 | __u64 op; |
46 | __u64 arg[5]; | 43 | __u64 arg[5]; |
diff --git a/init/Kconfig b/init/Kconfig index 88c10468db46..c9728992a776 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -613,6 +613,19 @@ config CGROUP_MEM_RES_CTLR_SWAP | |||
613 | if boot option "noswapaccount" is set, swap will not be accounted. | 613 | if boot option "noswapaccount" is set, swap will not be accounted. |
614 | Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page | 614 | Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page |
615 | size is 4096bytes, 512k per 1Gbytes of swap. | 615 | size is 4096bytes, 512k per 1Gbytes of swap. |
616 | config CGROUP_MEM_RES_CTLR_SWAP_ENABLED | ||
617 | bool "Memory Resource Controller Swap Extension enabled by default" | ||
618 | depends on CGROUP_MEM_RES_CTLR_SWAP | ||
619 | default y | ||
620 | help | ||
621 | Memory Resource Controller Swap Extension comes with its price in | ||
622 | a bigger memory consumption. General purpose distribution kernels | ||
623 | which want to enable the feautre but keep it disabled by default | ||
624 | and let the user enable it by swapaccount boot command line | ||
625 | parameter should have this option unselected. | ||
626 | For those who want to have the feature enabled by default should | ||
627 | select this option (if, for some reason, they need to disable it | ||
628 | then noswapaccount does the trick). | ||
616 | 629 | ||
617 | menuconfig CGROUP_SCHED | 630 | menuconfig CGROUP_SCHED |
618 | bool "Group CPU scheduler" | 631 | bool "Group CPU scheduler" |
diff --git a/init/main.c b/init/main.c index e59af24a0b7c..8646401f7a0e 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/ioport.h> | 21 | #include <linux/ioport.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/smp_lock.h> | ||
24 | #include <linux/initrd.h> | 23 | #include <linux/initrd.h> |
25 | #include <linux/bootmem.h> | 24 | #include <linux/bootmem.h> |
26 | #include <linux/acpi.h> | 25 | #include <linux/acpi.h> |
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 37755d621924..a6e729766821 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c | |||
@@ -82,7 +82,7 @@ static kdbtab_t kdb_base_commands[50]; | |||
82 | #define for_each_kdbcmd(cmd, num) \ | 82 | #define for_each_kdbcmd(cmd, num) \ |
83 | for ((cmd) = kdb_base_commands, (num) = 0; \ | 83 | for ((cmd) = kdb_base_commands, (num) = 0; \ |
84 | num < kdb_max_commands; \ | 84 | num < kdb_max_commands; \ |
85 | num == KDB_BASE_CMD_MAX ? cmd = kdb_commands : cmd++, num++) | 85 | num++, num == KDB_BASE_CMD_MAX ? cmd = kdb_commands : cmd++) |
86 | 86 | ||
87 | typedef struct _kdbmsg { | 87 | typedef struct _kdbmsg { |
88 | int km_diag; /* kdb diagnostic */ | 88 | int km_diag; /* kdb diagnostic */ |
@@ -646,7 +646,7 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0) | |||
646 | } | 646 | } |
647 | if (!s->usable) | 647 | if (!s->usable) |
648 | return KDB_NOTIMP; | 648 | return KDB_NOTIMP; |
649 | s->command = kmalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB); | 649 | s->command = kzalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB); |
650 | if (!s->command) { | 650 | if (!s->command) { |
651 | kdb_printf("Could not allocate new kdb_defcmd table for %s\n", | 651 | kdb_printf("Could not allocate new kdb_defcmd table for %s\n", |
652 | cmdstr); | 652 | cmdstr); |
@@ -2361,7 +2361,7 @@ static int kdb_pid(int argc, const char **argv) | |||
2361 | */ | 2361 | */ |
2362 | static int kdb_ll(int argc, const char **argv) | 2362 | static int kdb_ll(int argc, const char **argv) |
2363 | { | 2363 | { |
2364 | int diag; | 2364 | int diag = 0; |
2365 | unsigned long addr; | 2365 | unsigned long addr; |
2366 | long offset = 0; | 2366 | long offset = 0; |
2367 | unsigned long va; | 2367 | unsigned long va; |
@@ -2400,20 +2400,21 @@ static int kdb_ll(int argc, const char **argv) | |||
2400 | char buf[80]; | 2400 | char buf[80]; |
2401 | 2401 | ||
2402 | if (KDB_FLAG(CMD_INTERRUPT)) | 2402 | if (KDB_FLAG(CMD_INTERRUPT)) |
2403 | return 0; | 2403 | goto out; |
2404 | 2404 | ||
2405 | sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va); | 2405 | sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va); |
2406 | diag = kdb_parse(buf); | 2406 | diag = kdb_parse(buf); |
2407 | if (diag) | 2407 | if (diag) |
2408 | return diag; | 2408 | goto out; |
2409 | 2409 | ||
2410 | addr = va + linkoffset; | 2410 | addr = va + linkoffset; |
2411 | if (kdb_getword(&va, addr, sizeof(va))) | 2411 | if (kdb_getword(&va, addr, sizeof(va))) |
2412 | return 0; | 2412 | goto out; |
2413 | } | 2413 | } |
2414 | kfree(command); | ||
2415 | 2414 | ||
2416 | return 0; | 2415 | out: |
2416 | kfree(command); | ||
2417 | return diag; | ||
2417 | } | 2418 | } |
2418 | 2419 | ||
2419 | static int kdb_kgdb(int argc, const char **argv) | 2420 | static int kdb_kgdb(int argc, const char **argv) |
@@ -2739,13 +2740,13 @@ int kdb_register_repeat(char *cmd, | |||
2739 | } | 2740 | } |
2740 | if (kdb_commands) { | 2741 | if (kdb_commands) { |
2741 | memcpy(new, kdb_commands, | 2742 | memcpy(new, kdb_commands, |
2742 | kdb_max_commands * sizeof(*new)); | 2743 | (kdb_max_commands - KDB_BASE_CMD_MAX) * sizeof(*new)); |
2743 | kfree(kdb_commands); | 2744 | kfree(kdb_commands); |
2744 | } | 2745 | } |
2745 | memset(new + kdb_max_commands, 0, | 2746 | memset(new + kdb_max_commands, 0, |
2746 | kdb_command_extend * sizeof(*new)); | 2747 | kdb_command_extend * sizeof(*new)); |
2747 | kdb_commands = new; | 2748 | kdb_commands = new; |
2748 | kp = kdb_commands + kdb_max_commands; | 2749 | kp = kdb_commands + kdb_max_commands - KDB_BASE_CMD_MAX; |
2749 | kdb_max_commands += kdb_command_extend; | 2750 | kdb_max_commands += kdb_command_extend; |
2750 | } | 2751 | } |
2751 | 2752 | ||
diff --git a/kernel/futex.c b/kernel/futex.c index 6c683b37f2ce..40a8777a27d0 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -2489,7 +2489,8 @@ void exit_robust_list(struct task_struct *curr) | |||
2489 | { | 2489 | { |
2490 | struct robust_list_head __user *head = curr->robust_list; | 2490 | struct robust_list_head __user *head = curr->robust_list; |
2491 | struct robust_list __user *entry, *next_entry, *pending; | 2491 | struct robust_list __user *entry, *next_entry, *pending; |
2492 | unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; | 2492 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
2493 | unsigned int uninitialized_var(next_pi); | ||
2493 | unsigned long futex_offset; | 2494 | unsigned long futex_offset; |
2494 | int rc; | 2495 | int rc; |
2495 | 2496 | ||
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 06da4dfc339b..a7934ac75e5b 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
@@ -49,7 +49,8 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
49 | { | 49 | { |
50 | struct compat_robust_list_head __user *head = curr->compat_robust_list; | 50 | struct compat_robust_list_head __user *head = curr->compat_robust_list; |
51 | struct robust_list __user *entry, *next_entry, *pending; | 51 | struct robust_list __user *entry, *next_entry, *pending; |
52 | unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; | 52 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
53 | unsigned int uninitialized_var(next_pi); | ||
53 | compat_uptr_t uentry, next_uentry, upending; | 54 | compat_uptr_t uentry, next_uentry, upending; |
54 | compat_long_t futex_offset; | 55 | compat_long_t futex_offset; |
55 | int rc; | 56 | int rc; |
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index c7a8f453919e..aeaa7f846821 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c | |||
@@ -121,10 +121,10 @@ static inline int pm_qos_get_value(struct pm_qos_object *o) | |||
121 | 121 | ||
122 | switch (o->type) { | 122 | switch (o->type) { |
123 | case PM_QOS_MIN: | 123 | case PM_QOS_MIN: |
124 | return plist_last(&o->requests)->prio; | 124 | return plist_first(&o->requests)->prio; |
125 | 125 | ||
126 | case PM_QOS_MAX: | 126 | case PM_QOS_MAX: |
127 | return plist_first(&o->requests)->prio; | 127 | return plist_last(&o->requests)->prio; |
128 | 128 | ||
129 | default: | 129 | default: |
130 | /* runtime check for not using enum */ | 130 | /* runtime check for not using enum */ |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 29bff6117abc..a5aff3ebad38 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -246,9 +246,13 @@ config PM_OPS | |||
246 | depends on PM_SLEEP || PM_RUNTIME | 246 | depends on PM_SLEEP || PM_RUNTIME |
247 | default y | 247 | default y |
248 | 248 | ||
249 | config ARCH_HAS_OPP | ||
250 | bool | ||
251 | |||
249 | config PM_OPP | 252 | config PM_OPP |
250 | bool "Operating Performance Point (OPP) Layer library" | 253 | bool "Operating Performance Point (OPP) Layer library" |
251 | depends on PM | 254 | depends on PM |
255 | depends on ARCH_HAS_OPP | ||
252 | ---help--- | 256 | ---help--- |
253 | SOCs have a standard set of tuples consisting of frequency and | 257 | SOCs have a standard set of tuples consisting of frequency and |
254 | voltage pairs that the device will support per voltage domain. This | 258 | voltage pairs that the device will support per voltage domain. This |
diff --git a/kernel/printk.c b/kernel/printk.c index 38e7d5868d60..9a2264fc42ca 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -274,7 +274,20 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) | |||
274 | char c; | 274 | char c; |
275 | int error = 0; | 275 | int error = 0; |
276 | 276 | ||
277 | error = security_syslog(type, from_file); | 277 | /* |
278 | * If this is from /proc/kmsg we only do the capabilities checks | ||
279 | * at open time. | ||
280 | */ | ||
281 | if (type == SYSLOG_ACTION_OPEN || !from_file) { | ||
282 | if (dmesg_restrict && !capable(CAP_SYS_ADMIN)) | ||
283 | return -EPERM; | ||
284 | if ((type != SYSLOG_ACTION_READ_ALL && | ||
285 | type != SYSLOG_ACTION_SIZE_BUFFER) && | ||
286 | !capable(CAP_SYS_ADMIN)) | ||
287 | return -EPERM; | ||
288 | } | ||
289 | |||
290 | error = security_syslog(type); | ||
278 | if (error) | 291 | if (error) |
279 | return error; | 292 | return error; |
280 | 293 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index aa14a56f9d03..dc91a4d09ac3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -560,18 +560,8 @@ struct rq { | |||
560 | 560 | ||
561 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 561 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
562 | 562 | ||
563 | static inline | ||
564 | void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | ||
565 | { | ||
566 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); | ||
567 | 563 | ||
568 | /* | 564 | static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); |
569 | * A queue event has occurred, and we're going to schedule. In | ||
570 | * this case, we can save a useless back to back clock update. | ||
571 | */ | ||
572 | if (test_tsk_need_resched(p)) | ||
573 | rq->skip_clock_update = 1; | ||
574 | } | ||
575 | 565 | ||
576 | static inline int cpu_of(struct rq *rq) | 566 | static inline int cpu_of(struct rq *rq) |
577 | { | 567 | { |
@@ -2118,6 +2108,31 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p, | |||
2118 | p->sched_class->prio_changed(rq, p, oldprio, running); | 2108 | p->sched_class->prio_changed(rq, p, oldprio, running); |
2119 | } | 2109 | } |
2120 | 2110 | ||
2111 | static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | ||
2112 | { | ||
2113 | const struct sched_class *class; | ||
2114 | |||
2115 | if (p->sched_class == rq->curr->sched_class) { | ||
2116 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); | ||
2117 | } else { | ||
2118 | for_each_class(class) { | ||
2119 | if (class == rq->curr->sched_class) | ||
2120 | break; | ||
2121 | if (class == p->sched_class) { | ||
2122 | resched_task(rq->curr); | ||
2123 | break; | ||
2124 | } | ||
2125 | } | ||
2126 | } | ||
2127 | |||
2128 | /* | ||
2129 | * A queue event has occurred, and we're going to schedule. In | ||
2130 | * this case, we can save a useless back to back clock update. | ||
2131 | */ | ||
2132 | if (test_tsk_need_resched(rq->curr)) | ||
2133 | rq->skip_clock_update = 1; | ||
2134 | } | ||
2135 | |||
2121 | #ifdef CONFIG_SMP | 2136 | #ifdef CONFIG_SMP |
2122 | /* | 2137 | /* |
2123 | * Is this task likely cache-hot: | 2138 | * Is this task likely cache-hot: |
@@ -6960,6 +6975,8 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
6960 | if (cpu != group_first_cpu(sd->groups)) | 6975 | if (cpu != group_first_cpu(sd->groups)) |
6961 | return; | 6976 | return; |
6962 | 6977 | ||
6978 | sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); | ||
6979 | |||
6963 | child = sd->child; | 6980 | child = sd->child; |
6964 | 6981 | ||
6965 | sd->groups->cpu_power = 0; | 6982 | sd->groups->cpu_power = 0; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index f4f6a8326dd0..52ab113d8bb9 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1654,12 +1654,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
1654 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 1654 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
1655 | int scale = cfs_rq->nr_running >= sched_nr_latency; | 1655 | int scale = cfs_rq->nr_running >= sched_nr_latency; |
1656 | 1656 | ||
1657 | if (unlikely(rt_prio(p->prio))) | ||
1658 | goto preempt; | ||
1659 | |||
1660 | if (unlikely(p->sched_class != &fair_sched_class)) | ||
1661 | return; | ||
1662 | |||
1663 | if (unlikely(se == pse)) | 1657 | if (unlikely(se == pse)) |
1664 | return; | 1658 | return; |
1665 | 1659 | ||
@@ -2035,13 +2029,16 @@ struct sd_lb_stats { | |||
2035 | unsigned long this_load_per_task; | 2029 | unsigned long this_load_per_task; |
2036 | unsigned long this_nr_running; | 2030 | unsigned long this_nr_running; |
2037 | unsigned long this_has_capacity; | 2031 | unsigned long this_has_capacity; |
2032 | unsigned int this_idle_cpus; | ||
2038 | 2033 | ||
2039 | /* Statistics of the busiest group */ | 2034 | /* Statistics of the busiest group */ |
2035 | unsigned int busiest_idle_cpus; | ||
2040 | unsigned long max_load; | 2036 | unsigned long max_load; |
2041 | unsigned long busiest_load_per_task; | 2037 | unsigned long busiest_load_per_task; |
2042 | unsigned long busiest_nr_running; | 2038 | unsigned long busiest_nr_running; |
2043 | unsigned long busiest_group_capacity; | 2039 | unsigned long busiest_group_capacity; |
2044 | unsigned long busiest_has_capacity; | 2040 | unsigned long busiest_has_capacity; |
2041 | unsigned int busiest_group_weight; | ||
2045 | 2042 | ||
2046 | int group_imb; /* Is there imbalance in this sd */ | 2043 | int group_imb; /* Is there imbalance in this sd */ |
2047 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 2044 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
@@ -2063,6 +2060,8 @@ struct sg_lb_stats { | |||
2063 | unsigned long sum_nr_running; /* Nr tasks running in the group */ | 2060 | unsigned long sum_nr_running; /* Nr tasks running in the group */ |
2064 | unsigned long sum_weighted_load; /* Weighted load of group's tasks */ | 2061 | unsigned long sum_weighted_load; /* Weighted load of group's tasks */ |
2065 | unsigned long group_capacity; | 2062 | unsigned long group_capacity; |
2063 | unsigned long idle_cpus; | ||
2064 | unsigned long group_weight; | ||
2066 | int group_imb; /* Is there an imbalance in the group ? */ | 2065 | int group_imb; /* Is there an imbalance in the group ? */ |
2067 | int group_has_capacity; /* Is there extra capacity in the group? */ | 2066 | int group_has_capacity; /* Is there extra capacity in the group? */ |
2068 | }; | 2067 | }; |
@@ -2431,7 +2430,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, | |||
2431 | sgs->group_load += load; | 2430 | sgs->group_load += load; |
2432 | sgs->sum_nr_running += rq->nr_running; | 2431 | sgs->sum_nr_running += rq->nr_running; |
2433 | sgs->sum_weighted_load += weighted_cpuload(i); | 2432 | sgs->sum_weighted_load += weighted_cpuload(i); |
2434 | 2433 | if (idle_cpu(i)) | |
2434 | sgs->idle_cpus++; | ||
2435 | } | 2435 | } |
2436 | 2436 | ||
2437 | /* | 2437 | /* |
@@ -2469,6 +2469,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, | |||
2469 | sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); | 2469 | sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); |
2470 | if (!sgs->group_capacity) | 2470 | if (!sgs->group_capacity) |
2471 | sgs->group_capacity = fix_small_capacity(sd, group); | 2471 | sgs->group_capacity = fix_small_capacity(sd, group); |
2472 | sgs->group_weight = group->group_weight; | ||
2472 | 2473 | ||
2473 | if (sgs->group_capacity > sgs->sum_nr_running) | 2474 | if (sgs->group_capacity > sgs->sum_nr_running) |
2474 | sgs->group_has_capacity = 1; | 2475 | sgs->group_has_capacity = 1; |
@@ -2576,13 +2577,16 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, | |||
2576 | sds->this_nr_running = sgs.sum_nr_running; | 2577 | sds->this_nr_running = sgs.sum_nr_running; |
2577 | sds->this_load_per_task = sgs.sum_weighted_load; | 2578 | sds->this_load_per_task = sgs.sum_weighted_load; |
2578 | sds->this_has_capacity = sgs.group_has_capacity; | 2579 | sds->this_has_capacity = sgs.group_has_capacity; |
2580 | sds->this_idle_cpus = sgs.idle_cpus; | ||
2579 | } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) { | 2581 | } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) { |
2580 | sds->max_load = sgs.avg_load; | 2582 | sds->max_load = sgs.avg_load; |
2581 | sds->busiest = sg; | 2583 | sds->busiest = sg; |
2582 | sds->busiest_nr_running = sgs.sum_nr_running; | 2584 | sds->busiest_nr_running = sgs.sum_nr_running; |
2585 | sds->busiest_idle_cpus = sgs.idle_cpus; | ||
2583 | sds->busiest_group_capacity = sgs.group_capacity; | 2586 | sds->busiest_group_capacity = sgs.group_capacity; |
2584 | sds->busiest_load_per_task = sgs.sum_weighted_load; | 2587 | sds->busiest_load_per_task = sgs.sum_weighted_load; |
2585 | sds->busiest_has_capacity = sgs.group_has_capacity; | 2588 | sds->busiest_has_capacity = sgs.group_has_capacity; |
2589 | sds->busiest_group_weight = sgs.group_weight; | ||
2586 | sds->group_imb = sgs.group_imb; | 2590 | sds->group_imb = sgs.group_imb; |
2587 | } | 2591 | } |
2588 | 2592 | ||
@@ -2860,8 +2864,26 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2860 | if (sds.this_load >= sds.avg_load) | 2864 | if (sds.this_load >= sds.avg_load) |
2861 | goto out_balanced; | 2865 | goto out_balanced; |
2862 | 2866 | ||
2863 | if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) | 2867 | /* |
2864 | goto out_balanced; | 2868 | * In the CPU_NEWLY_IDLE, use imbalance_pct to be conservative. |
2869 | * And to check for busy balance use !idle_cpu instead of | ||
2870 | * CPU_NOT_IDLE. This is because HT siblings will use CPU_NOT_IDLE | ||
2871 | * even when they are idle. | ||
2872 | */ | ||
2873 | if (idle == CPU_NEWLY_IDLE || !idle_cpu(this_cpu)) { | ||
2874 | if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) | ||
2875 | goto out_balanced; | ||
2876 | } else { | ||
2877 | /* | ||
2878 | * This cpu is idle. If the busiest group load doesn't | ||
2879 | * have more tasks than the number of available cpu's and | ||
2880 | * there is no imbalance between this and busiest group | ||
2881 | * wrt to idle cpu's, it is balanced. | ||
2882 | */ | ||
2883 | if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) && | ||
2884 | sds.busiest_nr_running <= sds.busiest_group_weight) | ||
2885 | goto out_balanced; | ||
2886 | } | ||
2865 | 2887 | ||
2866 | force_balance: | 2888 | force_balance: |
2867 | /* Looks like there is an imbalance. Compute it */ | 2889 | /* Looks like there is an imbalance. Compute it */ |
diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c index 45bddc0c1048..2bf6b47058c1 100644 --- a/kernel/sched_stoptask.c +++ b/kernel/sched_stoptask.c | |||
@@ -19,14 +19,14 @@ select_task_rq_stop(struct rq *rq, struct task_struct *p, | |||
19 | static void | 19 | static void |
20 | check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) | 20 | check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) |
21 | { | 21 | { |
22 | resched_task(rq->curr); /* we preempt everything */ | 22 | /* we're never preempted */ |
23 | } | 23 | } |
24 | 24 | ||
25 | static struct task_struct *pick_next_task_stop(struct rq *rq) | 25 | static struct task_struct *pick_next_task_stop(struct rq *rq) |
26 | { | 26 | { |
27 | struct task_struct *stop = rq->stop; | 27 | struct task_struct *stop = rq->stop; |
28 | 28 | ||
29 | if (stop && stop->state == TASK_RUNNING) | 29 | if (stop && stop->se.on_rq) |
30 | return stop; | 30 | return stop; |
31 | 31 | ||
32 | return NULL; | 32 | return NULL; |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index b65bf634035e..5abfa1518554 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -702,7 +702,6 @@ static struct ctl_table kern_table[] = { | |||
702 | .extra1 = &zero, | 702 | .extra1 = &zero, |
703 | .extra2 = &ten_thousand, | 703 | .extra2 = &ten_thousand, |
704 | }, | 704 | }, |
705 | #endif | ||
706 | { | 705 | { |
707 | .procname = "dmesg_restrict", | 706 | .procname = "dmesg_restrict", |
708 | .data = &dmesg_restrict, | 707 | .data = &dmesg_restrict, |
@@ -712,6 +711,7 @@ static struct ctl_table kern_table[] = { | |||
712 | .extra1 = &zero, | 711 | .extra1 = &zero, |
713 | .extra2 = &one, | 712 | .extra2 = &one, |
714 | }, | 713 | }, |
714 | #endif | ||
715 | { | 715 | { |
716 | .procname = "ngroups_max", | 716 | .procname = "ngroups_max", |
717 | .data = &ngroups_max, | 717 | .data = &ngroups_max, |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e04b8bcdef88..ea37e2ff4164 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -126,7 +126,7 @@ if FTRACE | |||
126 | config FUNCTION_TRACER | 126 | config FUNCTION_TRACER |
127 | bool "Kernel Function Tracer" | 127 | bool "Kernel Function Tracer" |
128 | depends on HAVE_FUNCTION_TRACER | 128 | depends on HAVE_FUNCTION_TRACER |
129 | select FRAME_POINTER if (!ARM_UNWIND) | 129 | select FRAME_POINTER if !ARM_UNWIND && !S390 |
130 | select KALLSYMS | 130 | select KALLSYMS |
131 | select GENERIC_TRACER | 131 | select GENERIC_TRACER |
132 | select CONTEXT_SWITCH_TRACER | 132 | select CONTEXT_SWITCH_TRACER |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 82d9b8106cd0..042084157980 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/smp_lock.h> | ||
21 | #include <linux/notifier.h> | 20 | #include <linux/notifier.h> |
22 | #include <linux/irqflags.h> | 21 | #include <linux/irqflags.h> |
23 | #include <linux/debugfs.h> | 22 | #include <linux/debugfs.h> |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2efa8ea07ff7..7a22b4129211 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -61,7 +61,14 @@ struct mem_cgroup *root_mem_cgroup __read_mostly; | |||
61 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 61 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
62 | /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ | 62 | /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ |
63 | int do_swap_account __read_mostly; | 63 | int do_swap_account __read_mostly; |
64 | static int really_do_swap_account __initdata = 1; /* for remember boot option*/ | 64 | |
65 | /* for remember boot option*/ | ||
66 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED | ||
67 | static int really_do_swap_account __initdata = 1; | ||
68 | #else | ||
69 | static int really_do_swap_account __initdata = 0; | ||
70 | #endif | ||
71 | |||
65 | #else | 72 | #else |
66 | #define do_swap_account (0) | 73 | #define do_swap_account (0) |
67 | #endif | 74 | #endif |
@@ -278,13 +285,14 @@ enum move_type { | |||
278 | 285 | ||
279 | /* "mc" and its members are protected by cgroup_mutex */ | 286 | /* "mc" and its members are protected by cgroup_mutex */ |
280 | static struct move_charge_struct { | 287 | static struct move_charge_struct { |
281 | spinlock_t lock; /* for from, to, moving_task */ | 288 | spinlock_t lock; /* for from, to */ |
282 | struct mem_cgroup *from; | 289 | struct mem_cgroup *from; |
283 | struct mem_cgroup *to; | 290 | struct mem_cgroup *to; |
284 | unsigned long precharge; | 291 | unsigned long precharge; |
285 | unsigned long moved_charge; | 292 | unsigned long moved_charge; |
286 | unsigned long moved_swap; | 293 | unsigned long moved_swap; |
287 | struct task_struct *moving_task; /* a task moving charges */ | 294 | struct task_struct *moving_task; /* a task moving charges */ |
295 | struct mm_struct *mm; | ||
288 | wait_queue_head_t waitq; /* a waitq for other context */ | 296 | wait_queue_head_t waitq; /* a waitq for other context */ |
289 | } mc = { | 297 | } mc = { |
290 | .lock = __SPIN_LOCK_UNLOCKED(mc.lock), | 298 | .lock = __SPIN_LOCK_UNLOCKED(mc.lock), |
@@ -2152,7 +2160,7 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc, | |||
2152 | { | 2160 | { |
2153 | VM_BUG_ON(from == to); | 2161 | VM_BUG_ON(from == to); |
2154 | VM_BUG_ON(PageLRU(pc->page)); | 2162 | VM_BUG_ON(PageLRU(pc->page)); |
2155 | VM_BUG_ON(!PageCgroupLocked(pc)); | 2163 | VM_BUG_ON(!page_is_cgroup_locked(pc)); |
2156 | VM_BUG_ON(!PageCgroupUsed(pc)); | 2164 | VM_BUG_ON(!PageCgroupUsed(pc)); |
2157 | VM_BUG_ON(pc->mem_cgroup != from); | 2165 | VM_BUG_ON(pc->mem_cgroup != from); |
2158 | 2166 | ||
@@ -4631,7 +4639,7 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) | |||
4631 | unsigned long precharge; | 4639 | unsigned long precharge; |
4632 | struct vm_area_struct *vma; | 4640 | struct vm_area_struct *vma; |
4633 | 4641 | ||
4634 | down_read(&mm->mmap_sem); | 4642 | /* We've already held the mmap_sem */ |
4635 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 4643 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
4636 | struct mm_walk mem_cgroup_count_precharge_walk = { | 4644 | struct mm_walk mem_cgroup_count_precharge_walk = { |
4637 | .pmd_entry = mem_cgroup_count_precharge_pte_range, | 4645 | .pmd_entry = mem_cgroup_count_precharge_pte_range, |
@@ -4643,7 +4651,6 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) | |||
4643 | walk_page_range(vma->vm_start, vma->vm_end, | 4651 | walk_page_range(vma->vm_start, vma->vm_end, |
4644 | &mem_cgroup_count_precharge_walk); | 4652 | &mem_cgroup_count_precharge_walk); |
4645 | } | 4653 | } |
4646 | up_read(&mm->mmap_sem); | ||
4647 | 4654 | ||
4648 | precharge = mc.precharge; | 4655 | precharge = mc.precharge; |
4649 | mc.precharge = 0; | 4656 | mc.precharge = 0; |
@@ -4694,11 +4701,16 @@ static void mem_cgroup_clear_mc(void) | |||
4694 | 4701 | ||
4695 | mc.moved_swap = 0; | 4702 | mc.moved_swap = 0; |
4696 | } | 4703 | } |
4704 | if (mc.mm) { | ||
4705 | up_read(&mc.mm->mmap_sem); | ||
4706 | mmput(mc.mm); | ||
4707 | } | ||
4697 | spin_lock(&mc.lock); | 4708 | spin_lock(&mc.lock); |
4698 | mc.from = NULL; | 4709 | mc.from = NULL; |
4699 | mc.to = NULL; | 4710 | mc.to = NULL; |
4700 | mc.moving_task = NULL; | ||
4701 | spin_unlock(&mc.lock); | 4711 | spin_unlock(&mc.lock); |
4712 | mc.moving_task = NULL; | ||
4713 | mc.mm = NULL; | ||
4702 | mem_cgroup_end_move(from); | 4714 | mem_cgroup_end_move(from); |
4703 | memcg_oom_recover(from); | 4715 | memcg_oom_recover(from); |
4704 | memcg_oom_recover(to); | 4716 | memcg_oom_recover(to); |
@@ -4724,12 +4736,21 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | |||
4724 | return 0; | 4736 | return 0; |
4725 | /* We move charges only when we move a owner of the mm */ | 4737 | /* We move charges only when we move a owner of the mm */ |
4726 | if (mm->owner == p) { | 4738 | if (mm->owner == p) { |
4739 | /* | ||
4740 | * We do all the move charge works under one mmap_sem to | ||
4741 | * avoid deadlock with down_write(&mmap_sem) | ||
4742 | * -> try_charge() -> if (mc.moving_task) -> sleep. | ||
4743 | */ | ||
4744 | down_read(&mm->mmap_sem); | ||
4745 | |||
4727 | VM_BUG_ON(mc.from); | 4746 | VM_BUG_ON(mc.from); |
4728 | VM_BUG_ON(mc.to); | 4747 | VM_BUG_ON(mc.to); |
4729 | VM_BUG_ON(mc.precharge); | 4748 | VM_BUG_ON(mc.precharge); |
4730 | VM_BUG_ON(mc.moved_charge); | 4749 | VM_BUG_ON(mc.moved_charge); |
4731 | VM_BUG_ON(mc.moved_swap); | 4750 | VM_BUG_ON(mc.moved_swap); |
4732 | VM_BUG_ON(mc.moving_task); | 4751 | VM_BUG_ON(mc.moving_task); |
4752 | VM_BUG_ON(mc.mm); | ||
4753 | |||
4733 | mem_cgroup_start_move(from); | 4754 | mem_cgroup_start_move(from); |
4734 | spin_lock(&mc.lock); | 4755 | spin_lock(&mc.lock); |
4735 | mc.from = from; | 4756 | mc.from = from; |
@@ -4737,14 +4758,16 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | |||
4737 | mc.precharge = 0; | 4758 | mc.precharge = 0; |
4738 | mc.moved_charge = 0; | 4759 | mc.moved_charge = 0; |
4739 | mc.moved_swap = 0; | 4760 | mc.moved_swap = 0; |
4740 | mc.moving_task = current; | ||
4741 | spin_unlock(&mc.lock); | 4761 | spin_unlock(&mc.lock); |
4762 | mc.moving_task = current; | ||
4763 | mc.mm = mm; | ||
4742 | 4764 | ||
4743 | ret = mem_cgroup_precharge_mc(mm); | 4765 | ret = mem_cgroup_precharge_mc(mm); |
4744 | if (ret) | 4766 | if (ret) |
4745 | mem_cgroup_clear_mc(); | 4767 | mem_cgroup_clear_mc(); |
4746 | } | 4768 | /* We call up_read() and mmput() in clear_mc(). */ |
4747 | mmput(mm); | 4769 | } else |
4770 | mmput(mm); | ||
4748 | } | 4771 | } |
4749 | return ret; | 4772 | return ret; |
4750 | } | 4773 | } |
@@ -4832,7 +4855,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm) | |||
4832 | struct vm_area_struct *vma; | 4855 | struct vm_area_struct *vma; |
4833 | 4856 | ||
4834 | lru_add_drain_all(); | 4857 | lru_add_drain_all(); |
4835 | down_read(&mm->mmap_sem); | 4858 | /* We've already held the mmap_sem */ |
4836 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 4859 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
4837 | int ret; | 4860 | int ret; |
4838 | struct mm_walk mem_cgroup_move_charge_walk = { | 4861 | struct mm_walk mem_cgroup_move_charge_walk = { |
@@ -4851,7 +4874,6 @@ static void mem_cgroup_move_charge(struct mm_struct *mm) | |||
4851 | */ | 4874 | */ |
4852 | break; | 4875 | break; |
4853 | } | 4876 | } |
4854 | up_read(&mm->mmap_sem); | ||
4855 | } | 4877 | } |
4856 | 4878 | ||
4857 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | 4879 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, |
@@ -4860,17 +4882,11 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, | |||
4860 | struct task_struct *p, | 4882 | struct task_struct *p, |
4861 | bool threadgroup) | 4883 | bool threadgroup) |
4862 | { | 4884 | { |
4863 | struct mm_struct *mm; | 4885 | if (!mc.mm) |
4864 | |||
4865 | if (!mc.to) | ||
4866 | /* no need to move charge */ | 4886 | /* no need to move charge */ |
4867 | return; | 4887 | return; |
4868 | 4888 | ||
4869 | mm = get_task_mm(p); | 4889 | mem_cgroup_move_charge(mc.mm); |
4870 | if (mm) { | ||
4871 | mem_cgroup_move_charge(mm); | ||
4872 | mmput(mm); | ||
4873 | } | ||
4874 | mem_cgroup_clear_mc(); | 4890 | mem_cgroup_clear_mc(); |
4875 | } | 4891 | } |
4876 | #else /* !CONFIG_MMU */ | 4892 | #else /* !CONFIG_MMU */ |
@@ -4911,10 +4927,20 @@ struct cgroup_subsys mem_cgroup_subsys = { | |||
4911 | }; | 4927 | }; |
4912 | 4928 | ||
4913 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 4929 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
4930 | static int __init enable_swap_account(char *s) | ||
4931 | { | ||
4932 | /* consider enabled if no parameter or 1 is given */ | ||
4933 | if (!s || !strcmp(s, "1")) | ||
4934 | really_do_swap_account = 1; | ||
4935 | else if (!strcmp(s, "0")) | ||
4936 | really_do_swap_account = 0; | ||
4937 | return 1; | ||
4938 | } | ||
4939 | __setup("swapaccount", enable_swap_account); | ||
4914 | 4940 | ||
4915 | static int __init disable_swap_account(char *s) | 4941 | static int __init disable_swap_account(char *s) |
4916 | { | 4942 | { |
4917 | really_do_swap_account = 0; | 4943 | enable_swap_account("0"); |
4918 | return 1; | 4944 | return 1; |
4919 | } | 4945 | } |
4920 | __setup("noswapaccount", disable_swap_account); | 4946 | __setup("noswapaccount", disable_swap_account); |
diff --git a/mm/nommu.c b/mm/nommu.c index 3613517c7592..27a9ac588516 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1717,6 +1717,7 @@ void exit_mmap(struct mm_struct *mm) | |||
1717 | mm->mmap = vma->vm_next; | 1717 | mm->mmap = vma->vm_next; |
1718 | delete_vma_from_mm(vma); | 1718 | delete_vma_from_mm(vma); |
1719 | delete_vma(mm, vma); | 1719 | delete_vma(mm, vma); |
1720 | cond_resched(); | ||
1720 | } | 1721 | } |
1721 | 1722 | ||
1722 | kleave(""); | 1723 | kleave(""); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 07a654486f75..e4092704c1a9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -3008,14 +3008,6 @@ static __init_refok int __build_all_zonelists(void *data) | |||
3008 | build_zonelist_cache(pgdat); | 3008 | build_zonelist_cache(pgdat); |
3009 | } | 3009 | } |
3010 | 3010 | ||
3011 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
3012 | /* Setup real pagesets for the new zone */ | ||
3013 | if (data) { | ||
3014 | struct zone *zone = data; | ||
3015 | setup_zone_pageset(zone); | ||
3016 | } | ||
3017 | #endif | ||
3018 | |||
3019 | /* | 3011 | /* |
3020 | * Initialize the boot_pagesets that are going to be used | 3012 | * Initialize the boot_pagesets that are going to be used |
3021 | * for bootstrapping processors. The real pagesets for | 3013 | * for bootstrapping processors. The real pagesets for |
@@ -3064,7 +3056,11 @@ void build_all_zonelists(void *data) | |||
3064 | } else { | 3056 | } else { |
3065 | /* we have to stop all cpus to guarantee there is no user | 3057 | /* we have to stop all cpus to guarantee there is no user |
3066 | of zonelist */ | 3058 | of zonelist */ |
3067 | stop_machine(__build_all_zonelists, data, NULL); | 3059 | #ifdef CONFIG_MEMORY_HOTPLUG |
3060 | if (data) | ||
3061 | setup_zone_pageset((struct zone *)data); | ||
3062 | #endif | ||
3063 | stop_machine(__build_all_zonelists, NULL, NULL); | ||
3068 | /* cpuset refresh routine should be here */ | 3064 | /* cpuset refresh routine should be here */ |
3069 | } | 3065 | } |
3070 | vm_total_pages = nr_free_pagecache_pages(); | 3066 | vm_total_pages = nr_free_pagecache_pages(); |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 8b1a2ce21ee5..38cc58b8b2b0 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
@@ -139,7 +139,6 @@ int walk_page_range(unsigned long addr, unsigned long end, | |||
139 | pgd_t *pgd; | 139 | pgd_t *pgd; |
140 | unsigned long next; | 140 | unsigned long next; |
141 | int err = 0; | 141 | int err = 0; |
142 | struct vm_area_struct *vma; | ||
143 | 142 | ||
144 | if (addr >= end) | 143 | if (addr >= end) |
145 | return err; | 144 | return err; |
@@ -149,15 +148,17 @@ int walk_page_range(unsigned long addr, unsigned long end, | |||
149 | 148 | ||
150 | pgd = pgd_offset(walk->mm, addr); | 149 | pgd = pgd_offset(walk->mm, addr); |
151 | do { | 150 | do { |
151 | struct vm_area_struct *uninitialized_var(vma); | ||
152 | |||
152 | next = pgd_addr_end(addr, end); | 153 | next = pgd_addr_end(addr, end); |
153 | 154 | ||
155 | #ifdef CONFIG_HUGETLB_PAGE | ||
154 | /* | 156 | /* |
155 | * handle hugetlb vma individually because pagetable walk for | 157 | * handle hugetlb vma individually because pagetable walk for |
156 | * the hugetlb page is dependent on the architecture and | 158 | * the hugetlb page is dependent on the architecture and |
157 | * we can't handled it in the same manner as non-huge pages. | 159 | * we can't handled it in the same manner as non-huge pages. |
158 | */ | 160 | */ |
159 | vma = find_vma(walk->mm, addr); | 161 | vma = find_vma(walk->mm, addr); |
160 | #ifdef CONFIG_HUGETLB_PAGE | ||
161 | if (vma && is_vm_hugetlb_page(vma)) { | 162 | if (vma && is_vm_hugetlb_page(vma)) { |
162 | if (vma->vm_end < next) | 163 | if (vma->vm_end < next) |
163 | next = vma->vm_end; | 164 | next = vma->vm_end; |
@@ -3273,9 +3273,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3273 | kfree(n); | 3273 | kfree(n); |
3274 | kfree(s); | 3274 | kfree(s); |
3275 | } | 3275 | } |
3276 | err: | ||
3276 | up_write(&slub_lock); | 3277 | up_write(&slub_lock); |
3277 | 3278 | ||
3278 | err: | ||
3279 | if (flags & SLAB_PANIC) | 3279 | if (flags & SLAB_PANIC) |
3280 | panic("Cannot create slabcache %s\n", name); | 3280 | panic("Cannot create slabcache %s\n", name); |
3281 | else | 3281 | else |
@@ -3862,6 +3862,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
3862 | x += sprintf(buf + x, " N%d=%lu", | 3862 | x += sprintf(buf + x, " N%d=%lu", |
3863 | node, nodes[node]); | 3863 | node, nodes[node]); |
3864 | #endif | 3864 | #endif |
3865 | up_read(&slub_lock); | ||
3865 | kfree(nodes); | 3866 | kfree(nodes); |
3866 | return x + sprintf(buf + x, "\n"); | 3867 | return x + sprintf(buf + x, "\n"); |
3867 | } | 3868 | } |
diff --git a/net/ceph/buffer.c b/net/ceph/buffer.c index 53d8abfa25d5..bf3e6a13c215 100644 --- a/net/ceph/buffer.c +++ b/net/ceph/buffer.c | |||
@@ -19,7 +19,7 @@ struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp) | |||
19 | if (b->vec.iov_base) { | 19 | if (b->vec.iov_base) { |
20 | b->is_vmalloc = false; | 20 | b->is_vmalloc = false; |
21 | } else { | 21 | } else { |
22 | b->vec.iov_base = __vmalloc(len, gfp, PAGE_KERNEL); | 22 | b->vec.iov_base = __vmalloc(len, gfp | __GFP_HIGHMEM, PAGE_KERNEL); |
23 | if (!b->vec.iov_base) { | 23 | if (!b->vec.iov_base) { |
24 | kfree(b); | 24 | kfree(b); |
25 | return NULL; | 25 | return NULL; |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 0e8157ee5d43..1c7a2ec4f3cc 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -540,8 +540,7 @@ static void prepare_write_message(struct ceph_connection *con) | |||
540 | /* initialize page iterator */ | 540 | /* initialize page iterator */ |
541 | con->out_msg_pos.page = 0; | 541 | con->out_msg_pos.page = 0; |
542 | if (m->pages) | 542 | if (m->pages) |
543 | con->out_msg_pos.page_pos = | 543 | con->out_msg_pos.page_pos = m->page_alignment; |
544 | le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK; | ||
545 | else | 544 | else |
546 | con->out_msg_pos.page_pos = 0; | 545 | con->out_msg_pos.page_pos = 0; |
547 | con->out_msg_pos.data_pos = 0; | 546 | con->out_msg_pos.data_pos = 0; |
@@ -1491,7 +1490,7 @@ static int read_partial_message(struct ceph_connection *con) | |||
1491 | struct ceph_msg *m = con->in_msg; | 1490 | struct ceph_msg *m = con->in_msg; |
1492 | int ret; | 1491 | int ret; |
1493 | int to, left; | 1492 | int to, left; |
1494 | unsigned front_len, middle_len, data_len, data_off; | 1493 | unsigned front_len, middle_len, data_len; |
1495 | int datacrc = con->msgr->nocrc; | 1494 | int datacrc = con->msgr->nocrc; |
1496 | int skip; | 1495 | int skip; |
1497 | u64 seq; | 1496 | u64 seq; |
@@ -1527,19 +1526,17 @@ static int read_partial_message(struct ceph_connection *con) | |||
1527 | data_len = le32_to_cpu(con->in_hdr.data_len); | 1526 | data_len = le32_to_cpu(con->in_hdr.data_len); |
1528 | if (data_len > CEPH_MSG_MAX_DATA_LEN) | 1527 | if (data_len > CEPH_MSG_MAX_DATA_LEN) |
1529 | return -EIO; | 1528 | return -EIO; |
1530 | data_off = le16_to_cpu(con->in_hdr.data_off); | ||
1531 | 1529 | ||
1532 | /* verify seq# */ | 1530 | /* verify seq# */ |
1533 | seq = le64_to_cpu(con->in_hdr.seq); | 1531 | seq = le64_to_cpu(con->in_hdr.seq); |
1534 | if ((s64)seq - (s64)con->in_seq < 1) { | 1532 | if ((s64)seq - (s64)con->in_seq < 1) { |
1535 | pr_info("skipping %s%lld %s seq %lld, expected %lld\n", | 1533 | pr_info("skipping %s%lld %s seq %lld expected %lld\n", |
1536 | ENTITY_NAME(con->peer_name), | 1534 | ENTITY_NAME(con->peer_name), |
1537 | ceph_pr_addr(&con->peer_addr.in_addr), | 1535 | ceph_pr_addr(&con->peer_addr.in_addr), |
1538 | seq, con->in_seq + 1); | 1536 | seq, con->in_seq + 1); |
1539 | con->in_base_pos = -front_len - middle_len - data_len - | 1537 | con->in_base_pos = -front_len - middle_len - data_len - |
1540 | sizeof(m->footer); | 1538 | sizeof(m->footer); |
1541 | con->in_tag = CEPH_MSGR_TAG_READY; | 1539 | con->in_tag = CEPH_MSGR_TAG_READY; |
1542 | con->in_seq++; | ||
1543 | return 0; | 1540 | return 0; |
1544 | } else if ((s64)seq - (s64)con->in_seq > 1) { | 1541 | } else if ((s64)seq - (s64)con->in_seq > 1) { |
1545 | pr_err("read_partial_message bad seq %lld expected %lld\n", | 1542 | pr_err("read_partial_message bad seq %lld expected %lld\n", |
@@ -1576,7 +1573,7 @@ static int read_partial_message(struct ceph_connection *con) | |||
1576 | 1573 | ||
1577 | con->in_msg_pos.page = 0; | 1574 | con->in_msg_pos.page = 0; |
1578 | if (m->pages) | 1575 | if (m->pages) |
1579 | con->in_msg_pos.page_pos = data_off & ~PAGE_MASK; | 1576 | con->in_msg_pos.page_pos = m->page_alignment; |
1580 | else | 1577 | else |
1581 | con->in_msg_pos.page_pos = 0; | 1578 | con->in_msg_pos.page_pos = 0; |
1582 | con->in_msg_pos.data_pos = 0; | 1579 | con->in_msg_pos.data_pos = 0; |
@@ -2301,6 +2298,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags) | |||
2301 | 2298 | ||
2302 | /* data */ | 2299 | /* data */ |
2303 | m->nr_pages = 0; | 2300 | m->nr_pages = 0; |
2301 | m->page_alignment = 0; | ||
2304 | m->pages = NULL; | 2302 | m->pages = NULL; |
2305 | m->pagelist = NULL; | 2303 | m->pagelist = NULL; |
2306 | m->bio = NULL; | 2304 | m->bio = NULL; |
@@ -2370,6 +2368,7 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, | |||
2370 | type, front_len); | 2368 | type, front_len); |
2371 | return NULL; | 2369 | return NULL; |
2372 | } | 2370 | } |
2371 | msg->page_alignment = le16_to_cpu(hdr->data_off); | ||
2373 | } | 2372 | } |
2374 | memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); | 2373 | memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); |
2375 | 2374 | ||
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 79391994b3ed..3e20a122ffa2 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -71,6 +71,7 @@ void ceph_calc_raw_layout(struct ceph_osd_client *osdc, | |||
71 | op->extent.length = objlen; | 71 | op->extent.length = objlen; |
72 | } | 72 | } |
73 | req->r_num_pages = calc_pages_for(off, *plen); | 73 | req->r_num_pages = calc_pages_for(off, *plen); |
74 | req->r_page_alignment = off & ~PAGE_MASK; | ||
74 | if (op->op == CEPH_OSD_OP_WRITE) | 75 | if (op->op == CEPH_OSD_OP_WRITE) |
75 | op->payload_len = *plen; | 76 | op->payload_len = *plen; |
76 | 77 | ||
@@ -390,6 +391,8 @@ void ceph_osdc_build_request(struct ceph_osd_request *req, | |||
390 | req->r_request->hdr.data_len = cpu_to_le32(data_len); | 391 | req->r_request->hdr.data_len = cpu_to_le32(data_len); |
391 | } | 392 | } |
392 | 393 | ||
394 | req->r_request->page_alignment = req->r_page_alignment; | ||
395 | |||
393 | BUG_ON(p > msg->front.iov_base + msg->front.iov_len); | 396 | BUG_ON(p > msg->front.iov_base + msg->front.iov_len); |
394 | msg_size = p - msg->front.iov_base; | 397 | msg_size = p - msg->front.iov_base; |
395 | msg->front.iov_len = msg_size; | 398 | msg->front.iov_len = msg_size; |
@@ -419,7 +422,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | |||
419 | u32 truncate_seq, | 422 | u32 truncate_seq, |
420 | u64 truncate_size, | 423 | u64 truncate_size, |
421 | struct timespec *mtime, | 424 | struct timespec *mtime, |
422 | bool use_mempool, int num_reply) | 425 | bool use_mempool, int num_reply, |
426 | int page_align) | ||
423 | { | 427 | { |
424 | struct ceph_osd_req_op ops[3]; | 428 | struct ceph_osd_req_op ops[3]; |
425 | struct ceph_osd_request *req; | 429 | struct ceph_osd_request *req; |
@@ -447,6 +451,10 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | |||
447 | calc_layout(osdc, vino, layout, off, plen, req, ops); | 451 | calc_layout(osdc, vino, layout, off, plen, req, ops); |
448 | req->r_file_layout = *layout; /* keep a copy */ | 452 | req->r_file_layout = *layout; /* keep a copy */ |
449 | 453 | ||
454 | /* in case it differs from natural alignment that calc_layout | ||
455 | filled in for us */ | ||
456 | req->r_page_alignment = page_align; | ||
457 | |||
450 | ceph_osdc_build_request(req, off, plen, ops, | 458 | ceph_osdc_build_request(req, off, plen, ops, |
451 | snapc, | 459 | snapc, |
452 | mtime, | 460 | mtime, |
@@ -1489,7 +1497,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, | |||
1489 | struct ceph_vino vino, struct ceph_file_layout *layout, | 1497 | struct ceph_vino vino, struct ceph_file_layout *layout, |
1490 | u64 off, u64 *plen, | 1498 | u64 off, u64 *plen, |
1491 | u32 truncate_seq, u64 truncate_size, | 1499 | u32 truncate_seq, u64 truncate_size, |
1492 | struct page **pages, int num_pages) | 1500 | struct page **pages, int num_pages, int page_align) |
1493 | { | 1501 | { |
1494 | struct ceph_osd_request *req; | 1502 | struct ceph_osd_request *req; |
1495 | int rc = 0; | 1503 | int rc = 0; |
@@ -1499,15 +1507,15 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, | |||
1499 | req = ceph_osdc_new_request(osdc, layout, vino, off, plen, | 1507 | req = ceph_osdc_new_request(osdc, layout, vino, off, plen, |
1500 | CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, | 1508 | CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, |
1501 | NULL, 0, truncate_seq, truncate_size, NULL, | 1509 | NULL, 0, truncate_seq, truncate_size, NULL, |
1502 | false, 1); | 1510 | false, 1, page_align); |
1503 | if (!req) | 1511 | if (!req) |
1504 | return -ENOMEM; | 1512 | return -ENOMEM; |
1505 | 1513 | ||
1506 | /* it may be a short read due to an object boundary */ | 1514 | /* it may be a short read due to an object boundary */ |
1507 | req->r_pages = pages; | 1515 | req->r_pages = pages; |
1508 | 1516 | ||
1509 | dout("readpages final extent is %llu~%llu (%d pages)\n", | 1517 | dout("readpages final extent is %llu~%llu (%d pages align %d)\n", |
1510 | off, *plen, req->r_num_pages); | 1518 | off, *plen, req->r_num_pages, page_align); |
1511 | 1519 | ||
1512 | rc = ceph_osdc_start_request(osdc, req, false); | 1520 | rc = ceph_osdc_start_request(osdc, req, false); |
1513 | if (!rc) | 1521 | if (!rc) |
@@ -1533,6 +1541,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, | |||
1533 | { | 1541 | { |
1534 | struct ceph_osd_request *req; | 1542 | struct ceph_osd_request *req; |
1535 | int rc = 0; | 1543 | int rc = 0; |
1544 | int page_align = off & ~PAGE_MASK; | ||
1536 | 1545 | ||
1537 | BUG_ON(vino.snap != CEPH_NOSNAP); | 1546 | BUG_ON(vino.snap != CEPH_NOSNAP); |
1538 | req = ceph_osdc_new_request(osdc, layout, vino, off, &len, | 1547 | req = ceph_osdc_new_request(osdc, layout, vino, off, &len, |
@@ -1541,7 +1550,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, | |||
1541 | CEPH_OSD_FLAG_WRITE, | 1550 | CEPH_OSD_FLAG_WRITE, |
1542 | snapc, do_sync, | 1551 | snapc, do_sync, |
1543 | truncate_seq, truncate_size, mtime, | 1552 | truncate_seq, truncate_size, mtime, |
1544 | nofail, 1); | 1553 | nofail, 1, page_align); |
1545 | if (!req) | 1554 | if (!req) |
1546 | return -ENOMEM; | 1555 | return -ENOMEM; |
1547 | 1556 | ||
@@ -1638,8 +1647,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, | |||
1638 | m = ceph_msg_get(req->r_reply); | 1647 | m = ceph_msg_get(req->r_reply); |
1639 | 1648 | ||
1640 | if (data_len > 0) { | 1649 | if (data_len > 0) { |
1641 | unsigned data_off = le16_to_cpu(hdr->data_off); | 1650 | int want = calc_pages_for(req->r_page_alignment, data_len); |
1642 | int want = calc_pages_for(data_off & ~PAGE_MASK, data_len); | ||
1643 | 1651 | ||
1644 | if (unlikely(req->r_num_pages < want)) { | 1652 | if (unlikely(req->r_num_pages < want)) { |
1645 | pr_warning("tid %lld reply %d > expected %d pages\n", | 1653 | pr_warning("tid %lld reply %d > expected %d pages\n", |
@@ -1651,6 +1659,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, | |||
1651 | } | 1659 | } |
1652 | m->pages = req->r_pages; | 1660 | m->pages = req->r_pages; |
1653 | m->nr_pages = req->r_num_pages; | 1661 | m->nr_pages = req->r_num_pages; |
1662 | m->page_alignment = req->r_page_alignment; | ||
1654 | #ifdef CONFIG_BLOCK | 1663 | #ifdef CONFIG_BLOCK |
1655 | m->bio = req->r_bio; | 1664 | m->bio = req->r_bio; |
1656 | #endif | 1665 | #endif |
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c index 54caf0687155..ac34feeb2b3a 100644 --- a/net/ceph/pagevec.c +++ b/net/ceph/pagevec.c | |||
@@ -13,8 +13,7 @@ | |||
13 | * build a vector of user pages | 13 | * build a vector of user pages |
14 | */ | 14 | */ |
15 | struct page **ceph_get_direct_page_vector(const char __user *data, | 15 | struct page **ceph_get_direct_page_vector(const char __user *data, |
16 | int num_pages, | 16 | int num_pages) |
17 | loff_t off, size_t len) | ||
18 | { | 17 | { |
19 | struct page **pages; | 18 | struct page **pages; |
20 | int rc; | 19 | int rc; |
diff --git a/net/core/filter.c b/net/core/filter.c index 23e9b2a6b4c8..c1ee800bc080 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -589,7 +589,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen) | |||
589 | EXPORT_SYMBOL(sk_chk_filter); | 589 | EXPORT_SYMBOL(sk_chk_filter); |
590 | 590 | ||
591 | /** | 591 | /** |
592 | * sk_filter_rcu_release: Release a socket filter by rcu_head | 592 | * sk_filter_rcu_release - Release a socket filter by rcu_head |
593 | * @rcu: rcu_head that contains the sk_filter to free | 593 | * @rcu: rcu_head that contains the sk_filter to free |
594 | */ | 594 | */ |
595 | static void sk_filter_rcu_release(struct rcu_head *rcu) | 595 | static void sk_filter_rcu_release(struct rcu_head *rcu) |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index a5ff5a89f376..7f902cad10f8 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -712,15 +712,21 @@ static void rx_queue_release(struct kobject *kobj) | |||
712 | 712 | ||
713 | 713 | ||
714 | map = rcu_dereference_raw(queue->rps_map); | 714 | map = rcu_dereference_raw(queue->rps_map); |
715 | if (map) | 715 | if (map) { |
716 | RCU_INIT_POINTER(queue->rps_map, NULL); | ||
716 | call_rcu(&map->rcu, rps_map_release); | 717 | call_rcu(&map->rcu, rps_map_release); |
718 | } | ||
717 | 719 | ||
718 | flow_table = rcu_dereference_raw(queue->rps_flow_table); | 720 | flow_table = rcu_dereference_raw(queue->rps_flow_table); |
719 | if (flow_table) | 721 | if (flow_table) { |
722 | RCU_INIT_POINTER(queue->rps_flow_table, NULL); | ||
720 | call_rcu(&flow_table->rcu, rps_dev_flow_table_release); | 723 | call_rcu(&flow_table->rcu, rps_dev_flow_table_release); |
724 | } | ||
721 | 725 | ||
722 | if (atomic_dec_and_test(&first->count)) | 726 | if (atomic_dec_and_test(&first->count)) |
723 | kfree(first); | 727 | kfree(first); |
728 | else | ||
729 | memset(kobj, 0, sizeof(*kobj)); | ||
724 | } | 730 | } |
725 | 731 | ||
726 | static struct kobj_type rx_queue_ktype = { | 732 | static struct kobj_type rx_queue_ktype = { |
diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 7552495aff7a..fceeb37d7161 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c | |||
@@ -45,9 +45,7 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, | |||
45 | nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); | 45 | nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); |
46 | lopt_size += nr_table_entries * sizeof(struct request_sock *); | 46 | lopt_size += nr_table_entries * sizeof(struct request_sock *); |
47 | if (lopt_size > PAGE_SIZE) | 47 | if (lopt_size > PAGE_SIZE) |
48 | lopt = __vmalloc(lopt_size, | 48 | lopt = vzalloc(lopt_size); |
49 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | ||
50 | PAGE_KERNEL); | ||
51 | else | 49 | else |
52 | lopt = kzalloc(lopt_size, GFP_KERNEL); | 50 | lopt = kzalloc(lopt_size, GFP_KERNEL); |
53 | if (lopt == NULL) | 51 | if (lopt == NULL) |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 200eb538fbb3..0f280348e0fd 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -365,7 +365,7 @@ static struct tnode *tnode_alloc(size_t size) | |||
365 | if (size <= PAGE_SIZE) | 365 | if (size <= PAGE_SIZE) |
366 | return kzalloc(size, GFP_KERNEL); | 366 | return kzalloc(size, GFP_KERNEL); |
367 | else | 367 | else |
368 | return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); | 368 | return vzalloc(size); |
369 | } | 369 | } |
370 | 370 | ||
371 | static void __tnode_vfree(struct work_struct *arg) | 371 | static void __tnode_vfree(struct work_struct *arg) |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 96bc7f9475a3..e5d1a44bcbdf 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -569,6 +569,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
569 | /* No need to clone since we're just using its address. */ | 569 | /* No need to clone since we're just using its address. */ |
570 | rt2 = rt; | 570 | rt2 = rt; |
571 | 571 | ||
572 | if (!fl.nl_u.ip4_u.saddr) | ||
573 | fl.nl_u.ip4_u.saddr = rt->rt_src; | ||
574 | |||
572 | err = xfrm_lookup(net, (struct dst_entry **)&rt, &fl, NULL, 0); | 575 | err = xfrm_lookup(net, (struct dst_entry **)&rt, &fl, NULL, 0); |
573 | switch (err) { | 576 | switch (err) { |
574 | case 0: | 577 | case 0: |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index b41ce0f0d514..23cc8e1ce8d4 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -98,7 +98,11 @@ | |||
98 | #endif | 98 | #endif |
99 | 99 | ||
100 | #define INFINITY_LIFE_TIME 0xFFFFFFFF | 100 | #define INFINITY_LIFE_TIME 0xFFFFFFFF |
101 | #define TIME_DELTA(a, b) ((unsigned long)((long)(a) - (long)(b))) | 101 | |
102 | static inline u32 cstamp_delta(unsigned long cstamp) | ||
103 | { | ||
104 | return (cstamp - INITIAL_JIFFIES) * 100UL / HZ; | ||
105 | } | ||
102 | 106 | ||
103 | #define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1) | 107 | #define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1) |
104 | #define ADDRCONF_TIMER_FUZZ (HZ / 4) | 108 | #define ADDRCONF_TIMER_FUZZ (HZ / 4) |
@@ -2754,13 +2758,13 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2754 | ifa->state = INET6_IFADDR_STATE_DEAD; | 2758 | ifa->state = INET6_IFADDR_STATE_DEAD; |
2755 | spin_unlock_bh(&ifa->state_lock); | 2759 | spin_unlock_bh(&ifa->state_lock); |
2756 | 2760 | ||
2757 | if (state == INET6_IFADDR_STATE_DEAD) { | 2761 | if (state != INET6_IFADDR_STATE_DEAD) { |
2758 | in6_ifa_put(ifa); | ||
2759 | } else { | ||
2760 | __ipv6_ifa_notify(RTM_DELADDR, ifa); | 2762 | __ipv6_ifa_notify(RTM_DELADDR, ifa); |
2761 | atomic_notifier_call_chain(&inet6addr_chain, | 2763 | atomic_notifier_call_chain(&inet6addr_chain, |
2762 | NETDEV_DOWN, ifa); | 2764 | NETDEV_DOWN, ifa); |
2763 | } | 2765 | } |
2766 | |||
2767 | in6_ifa_put(ifa); | ||
2764 | write_lock_bh(&idev->lock); | 2768 | write_lock_bh(&idev->lock); |
2765 | } | 2769 | } |
2766 | } | 2770 | } |
@@ -3444,10 +3448,8 @@ static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp, | |||
3444 | { | 3448 | { |
3445 | struct ifa_cacheinfo ci; | 3449 | struct ifa_cacheinfo ci; |
3446 | 3450 | ||
3447 | ci.cstamp = (u32)(TIME_DELTA(cstamp, INITIAL_JIFFIES) / HZ * 100 | 3451 | ci.cstamp = cstamp_delta(cstamp); |
3448 | + TIME_DELTA(cstamp, INITIAL_JIFFIES) % HZ * 100 / HZ); | 3452 | ci.tstamp = cstamp_delta(tstamp); |
3449 | ci.tstamp = (u32)(TIME_DELTA(tstamp, INITIAL_JIFFIES) / HZ * 100 | ||
3450 | + TIME_DELTA(tstamp, INITIAL_JIFFIES) % HZ * 100 / HZ); | ||
3451 | ci.ifa_prefered = preferred; | 3453 | ci.ifa_prefered = preferred; |
3452 | ci.ifa_valid = valid; | 3454 | ci.ifa_valid = valid; |
3453 | 3455 | ||
@@ -3798,8 +3800,10 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, | |||
3798 | array[DEVCONF_AUTOCONF] = cnf->autoconf; | 3800 | array[DEVCONF_AUTOCONF] = cnf->autoconf; |
3799 | array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits; | 3801 | array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits; |
3800 | array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits; | 3802 | array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits; |
3801 | array[DEVCONF_RTR_SOLICIT_INTERVAL] = cnf->rtr_solicit_interval; | 3803 | array[DEVCONF_RTR_SOLICIT_INTERVAL] = |
3802 | array[DEVCONF_RTR_SOLICIT_DELAY] = cnf->rtr_solicit_delay; | 3804 | jiffies_to_msecs(cnf->rtr_solicit_interval); |
3805 | array[DEVCONF_RTR_SOLICIT_DELAY] = | ||
3806 | jiffies_to_msecs(cnf->rtr_solicit_delay); | ||
3803 | array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version; | 3807 | array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version; |
3804 | #ifdef CONFIG_IPV6_PRIVACY | 3808 | #ifdef CONFIG_IPV6_PRIVACY |
3805 | array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr; | 3809 | array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr; |
@@ -3813,7 +3817,8 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, | |||
3813 | array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo; | 3817 | array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo; |
3814 | #ifdef CONFIG_IPV6_ROUTER_PREF | 3818 | #ifdef CONFIG_IPV6_ROUTER_PREF |
3815 | array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref; | 3819 | array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref; |
3816 | array[DEVCONF_RTR_PROBE_INTERVAL] = cnf->rtr_probe_interval; | 3820 | array[DEVCONF_RTR_PROBE_INTERVAL] = |
3821 | jiffies_to_msecs(cnf->rtr_probe_interval); | ||
3817 | #ifdef CONFIG_IPV6_ROUTE_INFO | 3822 | #ifdef CONFIG_IPV6_ROUTE_INFO |
3818 | array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen; | 3823 | array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen; |
3819 | #endif | 3824 | #endif |
@@ -3929,10 +3934,9 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, | |||
3929 | NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags); | 3934 | NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags); |
3930 | 3935 | ||
3931 | ci.max_reasm_len = IPV6_MAXPLEN; | 3936 | ci.max_reasm_len = IPV6_MAXPLEN; |
3932 | ci.tstamp = (__u32)(TIME_DELTA(idev->tstamp, INITIAL_JIFFIES) / HZ * 100 | 3937 | ci.tstamp = cstamp_delta(idev->tstamp); |
3933 | + TIME_DELTA(idev->tstamp, INITIAL_JIFFIES) % HZ * 100 / HZ); | 3938 | ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time); |
3934 | ci.reachable_time = idev->nd_parms->reachable_time; | 3939 | ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time); |
3935 | ci.retrans_time = idev->nd_parms->retrans_time; | ||
3936 | NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci); | 3940 | NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci); |
3937 | 3941 | ||
3938 | nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32)); | 3942 | nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32)); |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 7f097989cde2..a6de3059746d 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #include <linux/capability.h> | 45 | #include <linux/capability.h> |
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/types.h> | 47 | #include <linux/types.h> |
48 | #include <linux/smp_lock.h> | ||
49 | #include <linux/socket.h> | 48 | #include <linux/socket.h> |
50 | #include <linux/sockios.h> | 49 | #include <linux/sockios.h> |
51 | #include <linux/slab.h> | 50 | #include <linux/slab.h> |
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 7fa86373de41..7c567b8aa89a 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c | |||
@@ -15,7 +15,6 @@ | |||
15 | 15 | ||
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/smp_lock.h> | ||
19 | #include "irnet_ppp.h" /* Private header */ | 18 | #include "irnet_ppp.h" /* Private header */ |
20 | /* Please put other headers in irnet.h - Thanks */ | 19 | /* Please put other headers in irnet.h - Thanks */ |
21 | 20 | ||
diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 285761e77d90..f6054f9ccbe3 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c | |||
@@ -550,22 +550,30 @@ EXPORT_SYMBOL(irttp_close_tsap); | |||
550 | */ | 550 | */ |
551 | int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb) | 551 | int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb) |
552 | { | 552 | { |
553 | int ret; | ||
554 | |||
553 | IRDA_ASSERT(self != NULL, return -1;); | 555 | IRDA_ASSERT(self != NULL, return -1;); |
554 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); | 556 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); |
555 | IRDA_ASSERT(skb != NULL, return -1;); | 557 | IRDA_ASSERT(skb != NULL, return -1;); |
556 | 558 | ||
557 | IRDA_DEBUG(4, "%s()\n", __func__); | 559 | IRDA_DEBUG(4, "%s()\n", __func__); |
558 | 560 | ||
561 | /* Take shortcut on zero byte packets */ | ||
562 | if (skb->len == 0) { | ||
563 | ret = 0; | ||
564 | goto err; | ||
565 | } | ||
566 | |||
559 | /* Check that nothing bad happens */ | 567 | /* Check that nothing bad happens */ |
560 | if ((skb->len == 0) || (!self->connected)) { | 568 | if (!self->connected) { |
561 | IRDA_DEBUG(1, "%s(), No data, or not connected\n", | 569 | IRDA_WARNING("%s(), Not connected\n", __func__); |
562 | __func__); | 570 | ret = -ENOTCONN; |
563 | goto err; | 571 | goto err; |
564 | } | 572 | } |
565 | 573 | ||
566 | if (skb->len > self->max_seg_size) { | 574 | if (skb->len > self->max_seg_size) { |
567 | IRDA_DEBUG(1, "%s(), UData is too large for IrLAP!\n", | 575 | IRDA_ERROR("%s(), UData is too large for IrLAP!\n", __func__); |
568 | __func__); | 576 | ret = -EMSGSIZE; |
569 | goto err; | 577 | goto err; |
570 | } | 578 | } |
571 | 579 | ||
@@ -576,7 +584,7 @@ int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb) | |||
576 | 584 | ||
577 | err: | 585 | err: |
578 | dev_kfree_skb(skb); | 586 | dev_kfree_skb(skb); |
579 | return -1; | 587 | return ret; |
580 | } | 588 | } |
581 | EXPORT_SYMBOL(irttp_udata_request); | 589 | EXPORT_SYMBOL(irttp_udata_request); |
582 | 590 | ||
@@ -599,9 +607,15 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb) | |||
599 | IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__, | 607 | IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__, |
600 | skb_queue_len(&self->tx_queue)); | 608 | skb_queue_len(&self->tx_queue)); |
601 | 609 | ||
610 | /* Take shortcut on zero byte packets */ | ||
611 | if (skb->len == 0) { | ||
612 | ret = 0; | ||
613 | goto err; | ||
614 | } | ||
615 | |||
602 | /* Check that nothing bad happens */ | 616 | /* Check that nothing bad happens */ |
603 | if ((skb->len == 0) || (!self->connected)) { | 617 | if (!self->connected) { |
604 | IRDA_WARNING("%s: No data, or not connected\n", __func__); | 618 | IRDA_WARNING("%s: Not connected\n", __func__); |
605 | ret = -ENOTCONN; | 619 | ret = -ENOTCONN; |
606 | goto err; | 620 | goto err; |
607 | } | 621 | } |
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig index a22dac227055..70bd1d0774c6 100644 --- a/net/netfilter/ipvs/Kconfig +++ b/net/netfilter/ipvs/Kconfig | |||
@@ -4,6 +4,7 @@ | |||
4 | menuconfig IP_VS | 4 | menuconfig IP_VS |
5 | tristate "IP virtual server support" | 5 | tristate "IP virtual server support" |
6 | depends on NET && INET && NETFILTER | 6 | depends on NET && INET && NETFILTER |
7 | depends on (NF_CONNTRACK || NF_CONNTRACK=n) | ||
7 | ---help--- | 8 | ---help--- |
8 | IP Virtual Server support will let you build a high-performance | 9 | IP Virtual Server support will let you build a high-performance |
9 | virtual server based on cluster of two or more real servers. This | 10 | virtual server based on cluster of two or more real servers. This |
diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 8920f2a83327..4e37c1cbe8b2 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c | |||
@@ -567,7 +567,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
567 | goto out; | 567 | goto out; |
568 | } | 568 | } |
569 | 569 | ||
570 | if (args->nr_local > (u64)UINT_MAX) { | 570 | if (args->nr_local > UIO_MAXIOV) { |
571 | ret = -EMSGSIZE; | 571 | ret = -EMSGSIZE; |
572 | goto out; | 572 | goto out; |
573 | } | 573 | } |
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index f71a73107ae9..80df89d957ba 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c | |||
@@ -115,9 +115,7 @@ EXPORT_SYMBOL_GPL(svc_seq_show); | |||
115 | */ | 115 | */ |
116 | struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) | 116 | struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) |
117 | { | 117 | { |
118 | struct rpc_iostats *new; | 118 | return kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL); |
119 | new = kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL); | ||
120 | return new; | ||
121 | } | 119 | } |
122 | EXPORT_SYMBOL_GPL(rpc_alloc_iostats); | 120 | EXPORT_SYMBOL_GPL(rpc_alloc_iostats); |
123 | 121 | ||
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index c82fe739fbdc..ea2ff78dcf7b 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -5,7 +5,6 @@ | |||
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/smp_lock.h> | ||
9 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
10 | #include <linux/freezer.h> | 9 | #include <linux/freezer.h> |
11 | #include <linux/kthread.h> | 10 | #include <linux/kthread.h> |
diff --git a/net/wireless/chan.c b/net/wireless/chan.c index d0c92dddb26b..17cd0c04d139 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c | |||
@@ -44,6 +44,38 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev, | |||
44 | return chan; | 44 | return chan; |
45 | } | 45 | } |
46 | 46 | ||
47 | static bool can_beacon_sec_chan(struct wiphy *wiphy, | ||
48 | struct ieee80211_channel *chan, | ||
49 | enum nl80211_channel_type channel_type) | ||
50 | { | ||
51 | struct ieee80211_channel *sec_chan; | ||
52 | int diff; | ||
53 | |||
54 | switch (channel_type) { | ||
55 | case NL80211_CHAN_HT40PLUS: | ||
56 | diff = 20; | ||
57 | break; | ||
58 | case NL80211_CHAN_HT40MINUS: | ||
59 | diff = -20; | ||
60 | break; | ||
61 | default: | ||
62 | return false; | ||
63 | } | ||
64 | |||
65 | sec_chan = ieee80211_get_channel(wiphy, chan->center_freq + diff); | ||
66 | if (!sec_chan) | ||
67 | return false; | ||
68 | |||
69 | /* we'll need a DFS capability later */ | ||
70 | if (sec_chan->flags & (IEEE80211_CHAN_DISABLED | | ||
71 | IEEE80211_CHAN_PASSIVE_SCAN | | ||
72 | IEEE80211_CHAN_NO_IBSS | | ||
73 | IEEE80211_CHAN_RADAR)) | ||
74 | return false; | ||
75 | |||
76 | return true; | ||
77 | } | ||
78 | |||
47 | int cfg80211_set_freq(struct cfg80211_registered_device *rdev, | 79 | int cfg80211_set_freq(struct cfg80211_registered_device *rdev, |
48 | struct wireless_dev *wdev, int freq, | 80 | struct wireless_dev *wdev, int freq, |
49 | enum nl80211_channel_type channel_type) | 81 | enum nl80211_channel_type channel_type) |
@@ -68,6 +100,28 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev, | |||
68 | if (!chan) | 100 | if (!chan) |
69 | return -EINVAL; | 101 | return -EINVAL; |
70 | 102 | ||
103 | /* Both channels should be able to initiate communication */ | ||
104 | if (wdev && (wdev->iftype == NL80211_IFTYPE_ADHOC || | ||
105 | wdev->iftype == NL80211_IFTYPE_AP || | ||
106 | wdev->iftype == NL80211_IFTYPE_AP_VLAN || | ||
107 | wdev->iftype == NL80211_IFTYPE_MESH_POINT || | ||
108 | wdev->iftype == NL80211_IFTYPE_P2P_GO)) { | ||
109 | switch (channel_type) { | ||
110 | case NL80211_CHAN_HT40PLUS: | ||
111 | case NL80211_CHAN_HT40MINUS: | ||
112 | if (!can_beacon_sec_chan(&rdev->wiphy, chan, | ||
113 | channel_type)) { | ||
114 | printk(KERN_DEBUG | ||
115 | "cfg80211: Secondary channel not " | ||
116 | "allowed to initiate communication\n"); | ||
117 | return -EINVAL; | ||
118 | } | ||
119 | break; | ||
120 | default: | ||
121 | break; | ||
122 | } | ||
123 | } | ||
124 | |||
71 | result = rdev->ops->set_channel(&rdev->wiphy, | 125 | result = rdev->ops->set_channel(&rdev->wiphy, |
72 | wdev ? wdev->netdev : NULL, | 126 | wdev ? wdev->netdev : NULL, |
73 | chan, channel_type); | 127 | chan, channel_type); |
diff --git a/net/xfrm/xfrm_hash.c b/net/xfrm/xfrm_hash.c index a2023ec52329..1e98bc0fe0a5 100644 --- a/net/xfrm/xfrm_hash.c +++ b/net/xfrm/xfrm_hash.c | |||
@@ -19,7 +19,7 @@ struct hlist_head *xfrm_hash_alloc(unsigned int sz) | |||
19 | if (sz <= PAGE_SIZE) | 19 | if (sz <= PAGE_SIZE) |
20 | n = kzalloc(sz, GFP_KERNEL); | 20 | n = kzalloc(sz, GFP_KERNEL); |
21 | else if (hashdist) | 21 | else if (hashdist) |
22 | n = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); | 22 | n = vzalloc(sz); |
23 | else | 23 | else |
24 | n = (struct hlist_head *) | 24 | n = (struct hlist_head *) |
25 | __get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, | 25 | __get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, |
diff --git a/scripts/gfp-translate b/scripts/gfp-translate index d81b968d864e..c9230e158a8f 100644 --- a/scripts/gfp-translate +++ b/scripts/gfp-translate | |||
@@ -63,7 +63,12 @@ fi | |||
63 | 63 | ||
64 | # Extract GFP flags from the kernel source | 64 | # Extract GFP flags from the kernel source |
65 | TMPFILE=`mktemp -t gfptranslate-XXXXXX` || exit 1 | 65 | TMPFILE=`mktemp -t gfptranslate-XXXXXX` || exit 1 |
66 | grep "^#define __GFP" $SOURCE/include/linux/gfp.h | sed -e 's/(__force gfp_t)//' | sed -e 's/u)/)/' | grep -v GFP_BITS | sed -e 's/)\//) \//' > $TMPFILE | 66 | grep -q ___GFP $SOURCE/include/linux/gfp.h |
67 | if [ $? -eq 0 ]; then | ||
68 | grep "^#define ___GFP" $SOURCE/include/linux/gfp.h | sed -e 's/u$//' | grep -v GFP_BITS > $TMPFILE | ||
69 | else | ||
70 | grep "^#define __GFP" $SOURCE/include/linux/gfp.h | sed -e 's/(__force gfp_t)//' | sed -e 's/u)/)/' | grep -v GFP_BITS | sed -e 's/)\//) \//' > $TMPFILE | ||
71 | fi | ||
67 | 72 | ||
68 | # Parse the flags | 73 | # Parse the flags |
69 | IFS=" | 74 | IFS=" |
diff --git a/scripts/kernel-doc b/scripts/kernel-doc index cdb6dc1f6458..39580a5dc5df 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc | |||
@@ -5,7 +5,7 @@ use strict; | |||
5 | ## Copyright (c) 1998 Michael Zucchi, All Rights Reserved ## | 5 | ## Copyright (c) 1998 Michael Zucchi, All Rights Reserved ## |
6 | ## Copyright (C) 2000, 1 Tim Waugh <twaugh@redhat.com> ## | 6 | ## Copyright (C) 2000, 1 Tim Waugh <twaugh@redhat.com> ## |
7 | ## Copyright (C) 2001 Simon Huggins ## | 7 | ## Copyright (C) 2001 Simon Huggins ## |
8 | ## Copyright (C) 2005-2009 Randy Dunlap ## | 8 | ## Copyright (C) 2005-2010 Randy Dunlap ## |
9 | ## ## | 9 | ## ## |
10 | ## #define enhancements by Armin Kuster <akuster@mvista.com> ## | 10 | ## #define enhancements by Armin Kuster <akuster@mvista.com> ## |
11 | ## Copyright (c) 2000 MontaVista Software, Inc. ## | 11 | ## Copyright (c) 2000 MontaVista Software, Inc. ## |
@@ -453,7 +453,7 @@ sub output_highlight { | |||
453 | if ($output_mode eq "html" || $output_mode eq "xml") { | 453 | if ($output_mode eq "html" || $output_mode eq "xml") { |
454 | $contents = local_unescape($contents); | 454 | $contents = local_unescape($contents); |
455 | # convert data read & converted thru xml_escape() into &xyz; format: | 455 | # convert data read & converted thru xml_escape() into &xyz; format: |
456 | $contents =~ s/\\\\\\/&/g; | 456 | $contents =~ s/\\\\\\/\&/g; |
457 | } | 457 | } |
458 | # print STDERR "contents b4:$contents\n"; | 458 | # print STDERR "contents b4:$contents\n"; |
459 | eval $dohighlight; | 459 | eval $dohighlight; |
@@ -770,7 +770,11 @@ sub output_struct_xml(%) { | |||
770 | print $args{'type'} . " " . $args{'struct'} . " {\n"; | 770 | print $args{'type'} . " " . $args{'struct'} . " {\n"; |
771 | foreach $parameter (@{$args{'parameterlist'}}) { | 771 | foreach $parameter (@{$args{'parameterlist'}}) { |
772 | if ($parameter =~ /^#/) { | 772 | if ($parameter =~ /^#/) { |
773 | print "$parameter\n"; | 773 | my $prm = $parameter; |
774 | # convert data read & converted thru xml_escape() into &xyz; format: | ||
775 | # This allows us to have #define macros interspersed in a struct. | ||
776 | $prm =~ s/\\\\\\/\&/g; | ||
777 | print "$prm\n"; | ||
774 | next; | 778 | next; |
775 | } | 779 | } |
776 | 780 | ||
@@ -1701,6 +1705,8 @@ sub push_parameter($$$) { | |||
1701 | } | 1705 | } |
1702 | } | 1706 | } |
1703 | 1707 | ||
1708 | $param = xml_escape($param); | ||
1709 | |||
1704 | # strip spaces from $param so that it is one continous string | 1710 | # strip spaces from $param so that it is one continous string |
1705 | # on @parameterlist; | 1711 | # on @parameterlist; |
1706 | # this fixes a problem where check_sections() cannot find | 1712 | # this fixes a problem where check_sections() cannot find |
diff --git a/security/capability.c b/security/capability.c index 30ae00fbecd5..c773635ca3a0 100644 --- a/security/capability.c +++ b/security/capability.c | |||
@@ -17,6 +17,11 @@ static int cap_sysctl(ctl_table *table, int op) | |||
17 | return 0; | 17 | return 0; |
18 | } | 18 | } |
19 | 19 | ||
20 | static int cap_syslog(int type) | ||
21 | { | ||
22 | return 0; | ||
23 | } | ||
24 | |||
20 | static int cap_quotactl(int cmds, int type, int id, struct super_block *sb) | 25 | static int cap_quotactl(int cmds, int type, int id, struct super_block *sb) |
21 | { | 26 | { |
22 | return 0; | 27 | return 0; |
diff --git a/security/commoncap.c b/security/commoncap.c index 04b80f9912bf..64c2ed9c9015 100644 --- a/security/commoncap.c +++ b/security/commoncap.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/prctl.h> | 28 | #include <linux/prctl.h> |
29 | #include <linux/securebits.h> | 29 | #include <linux/securebits.h> |
30 | #include <linux/syslog.h> | ||
31 | 30 | ||
32 | /* | 31 | /* |
33 | * If a non-root user executes a setuid-root binary in | 32 | * If a non-root user executes a setuid-root binary in |
@@ -884,26 +883,6 @@ error: | |||
884 | } | 883 | } |
885 | 884 | ||
886 | /** | 885 | /** |
887 | * cap_syslog - Determine whether syslog function is permitted | ||
888 | * @type: Function requested | ||
889 | * @from_file: Whether this request came from an open file (i.e. /proc) | ||
890 | * | ||
891 | * Determine whether the current process is permitted to use a particular | ||
892 | * syslog function, returning 0 if permission is granted, -ve if not. | ||
893 | */ | ||
894 | int cap_syslog(int type, bool from_file) | ||
895 | { | ||
896 | if (type != SYSLOG_ACTION_OPEN && from_file) | ||
897 | return 0; | ||
898 | if (dmesg_restrict && !capable(CAP_SYS_ADMIN)) | ||
899 | return -EPERM; | ||
900 | if ((type != SYSLOG_ACTION_READ_ALL && | ||
901 | type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN)) | ||
902 | return -EPERM; | ||
903 | return 0; | ||
904 | } | ||
905 | |||
906 | /** | ||
907 | * cap_vm_enough_memory - Determine whether a new virtual mapping is permitted | 886 | * cap_vm_enough_memory - Determine whether a new virtual mapping is permitted |
908 | * @mm: The VM space in which the new mapping is to be made | 887 | * @mm: The VM space in which the new mapping is to be made |
909 | * @pages: The size of the mapping | 888 | * @pages: The size of the mapping |
diff --git a/security/security.c b/security/security.c index 3ef5e2a7a741..1b798d3df710 100644 --- a/security/security.c +++ b/security/security.c | |||
@@ -197,9 +197,9 @@ int security_quota_on(struct dentry *dentry) | |||
197 | return security_ops->quota_on(dentry); | 197 | return security_ops->quota_on(dentry); |
198 | } | 198 | } |
199 | 199 | ||
200 | int security_syslog(int type, bool from_file) | 200 | int security_syslog(int type) |
201 | { | 201 | { |
202 | return security_ops->syslog(type, from_file); | 202 | return security_ops->syslog(type); |
203 | } | 203 | } |
204 | 204 | ||
205 | int security_settime(struct timespec *ts, struct timezone *tz) | 205 | int security_settime(struct timespec *ts, struct timezone *tz) |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index d9154cf90ae1..65fa8bf596f5 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -1973,14 +1973,10 @@ static int selinux_quota_on(struct dentry *dentry) | |||
1973 | return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON); | 1973 | return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON); |
1974 | } | 1974 | } |
1975 | 1975 | ||
1976 | static int selinux_syslog(int type, bool from_file) | 1976 | static int selinux_syslog(int type) |
1977 | { | 1977 | { |
1978 | int rc; | 1978 | int rc; |
1979 | 1979 | ||
1980 | rc = cap_syslog(type, from_file); | ||
1981 | if (rc) | ||
1982 | return rc; | ||
1983 | |||
1984 | switch (type) { | 1980 | switch (type) { |
1985 | case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */ | 1981 | case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */ |
1986 | case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */ | 1982 | case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */ |
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index bc39f4067af6..489a85afa477 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c | |||
@@ -157,15 +157,11 @@ static int smack_ptrace_traceme(struct task_struct *ptp) | |||
157 | * | 157 | * |
158 | * Returns 0 on success, error code otherwise. | 158 | * Returns 0 on success, error code otherwise. |
159 | */ | 159 | */ |
160 | static int smack_syslog(int type, bool from_file) | 160 | static int smack_syslog(int typefrom_file) |
161 | { | 161 | { |
162 | int rc; | 162 | int rc = 0; |
163 | char *sp = current_security(); | 163 | char *sp = current_security(); |
164 | 164 | ||
165 | rc = cap_syslog(type, from_file); | ||
166 | if (rc != 0) | ||
167 | return rc; | ||
168 | |||
169 | if (capable(CAP_MAC_OVERRIDE)) | 165 | if (capable(CAP_MAC_OVERRIDE)) |
170 | return 0; | 166 | return 0; |
171 | 167 | ||
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c index f2f41c854221..6e2409181895 100644 --- a/sound/atmel/abdac.c +++ b/sound/atmel/abdac.c | |||
@@ -420,9 +420,9 @@ static int __devinit atmel_abdac_probe(struct platform_device *pdev) | |||
420 | return PTR_ERR(pclk); | 420 | return PTR_ERR(pclk); |
421 | } | 421 | } |
422 | sample_clk = clk_get(&pdev->dev, "sample_clk"); | 422 | sample_clk = clk_get(&pdev->dev, "sample_clk"); |
423 | if (IS_ERR(pclk)) { | 423 | if (IS_ERR(sample_clk)) { |
424 | dev_dbg(&pdev->dev, "no sample clock\n"); | 424 | dev_dbg(&pdev->dev, "no sample clock\n"); |
425 | retval = PTR_ERR(pclk); | 425 | retval = PTR_ERR(sample_clk); |
426 | goto out_put_pclk; | 426 | goto out_put_pclk; |
427 | } | 427 | } |
428 | clk_enable(pclk); | 428 | clk_enable(pclk); |
diff --git a/sound/core/info.c b/sound/core/info.c index b70564ed8b37..7077f601da5a 100644 --- a/sound/core/info.c +++ b/sound/core/info.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/time.h> | 23 | #include <linux/time.h> |
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/smp_lock.h> | ||
27 | #include <linux/string.h> | 26 | #include <linux/string.h> |
28 | #include <sound/core.h> | 27 | #include <sound/core.h> |
29 | #include <sound/minors.h> | 28 | #include <sound/minors.h> |
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index a1707cca9c66..b75db8e9cc0f 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c | |||
@@ -223,7 +223,7 @@ static void xrun_log(struct snd_pcm_substream *substream, | |||
223 | entry->jiffies = jiffies; | 223 | entry->jiffies = jiffies; |
224 | entry->pos = pos; | 224 | entry->pos = pos; |
225 | entry->period_size = runtime->period_size; | 225 | entry->period_size = runtime->period_size; |
226 | entry->buffer_size = runtime->buffer_size;; | 226 | entry->buffer_size = runtime->buffer_size; |
227 | entry->old_hw_ptr = runtime->status->hw_ptr; | 227 | entry->old_hw_ptr = runtime->status->hw_ptr; |
228 | entry->hw_ptr_base = runtime->hw_ptr_base; | 228 | entry->hw_ptr_base = runtime->hw_ptr_base; |
229 | log->idx = (log->idx + 1) % XRUN_LOG_CNT; | 229 | log->idx = (log->idx + 1) % XRUN_LOG_CNT; |
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 8bc7cb3db330..e82c1f97d99e 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
23 | #include <linux/file.h> | 23 | #include <linux/file.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/smp_lock.h> | ||
26 | #include <linux/time.h> | 25 | #include <linux/time.h> |
27 | #include <linux/pm_qos_params.h> | 26 | #include <linux/pm_qos_params.h> |
28 | #include <linux/uio.h> | 27 | #include <linux/uio.h> |
diff --git a/sound/core/sound.c b/sound/core/sound.c index 62a093efb453..66691fe437e6 100644 --- a/sound/core/sound.c +++ b/sound/core/sound.c | |||
@@ -21,7 +21,6 @@ | |||
21 | 21 | ||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/smp_lock.h> | ||
25 | #include <linux/time.h> | 24 | #include <linux/time.h> |
26 | #include <linux/device.h> | 25 | #include <linux/device.h> |
27 | #include <linux/moduleparam.h> | 26 | #include <linux/moduleparam.h> |
diff --git a/sound/oss/dev_table.c b/sound/oss/dev_table.c index 727bdb9ba2dc..d8cf3e58dc76 100644 --- a/sound/oss/dev_table.c +++ b/sound/oss/dev_table.c | |||
@@ -71,7 +71,7 @@ int sound_install_audiodrv(int vers, char *name, struct audio_driver *driver, | |||
71 | if (sound_nblocks >= MAX_MEM_BLOCKS) | 71 | if (sound_nblocks >= MAX_MEM_BLOCKS) |
72 | sound_nblocks = MAX_MEM_BLOCKS - 1; | 72 | sound_nblocks = MAX_MEM_BLOCKS - 1; |
73 | 73 | ||
74 | op = (struct audio_operations *) (sound_mem_blocks[sound_nblocks] = vmalloc(sizeof(struct audio_operations))); | 74 | op = (struct audio_operations *) (sound_mem_blocks[sound_nblocks] = vzalloc(sizeof(struct audio_operations))); |
75 | sound_nblocks++; | 75 | sound_nblocks++; |
76 | if (sound_nblocks >= MAX_MEM_BLOCKS) | 76 | if (sound_nblocks >= MAX_MEM_BLOCKS) |
77 | sound_nblocks = MAX_MEM_BLOCKS - 1; | 77 | sound_nblocks = MAX_MEM_BLOCKS - 1; |
@@ -81,7 +81,6 @@ int sound_install_audiodrv(int vers, char *name, struct audio_driver *driver, | |||
81 | sound_unload_audiodev(num); | 81 | sound_unload_audiodev(num); |
82 | return -(ENOMEM); | 82 | return -(ENOMEM); |
83 | } | 83 | } |
84 | memset((char *) op, 0, sizeof(struct audio_operations)); | ||
85 | init_waitqueue_head(&op->in_sleeper); | 84 | init_waitqueue_head(&op->in_sleeper); |
86 | init_waitqueue_head(&op->out_sleeper); | 85 | init_waitqueue_head(&op->out_sleeper); |
87 | init_waitqueue_head(&op->poll_sleeper); | 86 | init_waitqueue_head(&op->poll_sleeper); |
@@ -128,7 +127,7 @@ int sound_install_mixer(int vers, char *name, struct mixer_operations *driver, | |||
128 | /* FIXME: This leaks a mixer_operations struct every time its called | 127 | /* FIXME: This leaks a mixer_operations struct every time its called |
129 | until you unload sound! */ | 128 | until you unload sound! */ |
130 | 129 | ||
131 | op = (struct mixer_operations *) (sound_mem_blocks[sound_nblocks] = vmalloc(sizeof(struct mixer_operations))); | 130 | op = (struct mixer_operations *) (sound_mem_blocks[sound_nblocks] = vzalloc(sizeof(struct mixer_operations))); |
132 | sound_nblocks++; | 131 | sound_nblocks++; |
133 | if (sound_nblocks >= MAX_MEM_BLOCKS) | 132 | if (sound_nblocks >= MAX_MEM_BLOCKS) |
134 | sound_nblocks = MAX_MEM_BLOCKS - 1; | 133 | sound_nblocks = MAX_MEM_BLOCKS - 1; |
@@ -137,7 +136,6 @@ int sound_install_mixer(int vers, char *name, struct mixer_operations *driver, | |||
137 | printk(KERN_ERR "Sound: Can't allocate mixer driver for (%s)\n", name); | 136 | printk(KERN_ERR "Sound: Can't allocate mixer driver for (%s)\n", name); |
138 | return -ENOMEM; | 137 | return -ENOMEM; |
139 | } | 138 | } |
140 | memset((char *) op, 0, sizeof(struct mixer_operations)); | ||
141 | memcpy((char *) op, (char *) driver, driver_size); | 139 | memcpy((char *) op, (char *) driver, driver_size); |
142 | 140 | ||
143 | strlcpy(op->name, name, sizeof(op->name)); | 141 | strlcpy(op->name, name, sizeof(op->name)); |
diff --git a/sound/oss/midibuf.c b/sound/oss/midibuf.c index 782b3b84dac6..ceedb1eff203 100644 --- a/sound/oss/midibuf.c +++ b/sound/oss/midibuf.c | |||
@@ -178,7 +178,7 @@ int MIDIbuf_open(int dev, struct file *file) | |||
178 | return err; | 178 | return err; |
179 | 179 | ||
180 | parms[dev].prech_timeout = MAX_SCHEDULE_TIMEOUT; | 180 | parms[dev].prech_timeout = MAX_SCHEDULE_TIMEOUT; |
181 | midi_in_buf[dev] = (struct midi_buf *) vmalloc(sizeof(struct midi_buf)); | 181 | midi_in_buf[dev] = vmalloc(sizeof(struct midi_buf)); |
182 | 182 | ||
183 | if (midi_in_buf[dev] == NULL) | 183 | if (midi_in_buf[dev] == NULL) |
184 | { | 184 | { |
@@ -188,7 +188,7 @@ int MIDIbuf_open(int dev, struct file *file) | |||
188 | } | 188 | } |
189 | midi_in_buf[dev]->len = midi_in_buf[dev]->head = midi_in_buf[dev]->tail = 0; | 189 | midi_in_buf[dev]->len = midi_in_buf[dev]->head = midi_in_buf[dev]->tail = 0; |
190 | 190 | ||
191 | midi_out_buf[dev] = (struct midi_buf *) vmalloc(sizeof(struct midi_buf)); | 191 | midi_out_buf[dev] = vmalloc(sizeof(struct midi_buf)); |
192 | 192 | ||
193 | if (midi_out_buf[dev] == NULL) | 193 | if (midi_out_buf[dev] == NULL) |
194 | { | 194 | { |
diff --git a/sound/oss/pss.c b/sound/oss/pss.c index e19dd5dcc2de..9b800ce5100e 100644 --- a/sound/oss/pss.c +++ b/sound/oss/pss.c | |||
@@ -859,7 +859,7 @@ static int pss_coproc_ioctl(void *dev_info, unsigned int cmd, void __user *arg, | |||
859 | return 0; | 859 | return 0; |
860 | 860 | ||
861 | case SNDCTL_COPR_LOAD: | 861 | case SNDCTL_COPR_LOAD: |
862 | buf = (copr_buffer *) vmalloc(sizeof(copr_buffer)); | 862 | buf = vmalloc(sizeof(copr_buffer)); |
863 | if (buf == NULL) | 863 | if (buf == NULL) |
864 | return -ENOSPC; | 864 | return -ENOSPC; |
865 | if (copy_from_user(buf, arg, sizeof(copr_buffer))) { | 865 | if (copy_from_user(buf, arg, sizeof(copr_buffer))) { |
@@ -871,7 +871,7 @@ static int pss_coproc_ioctl(void *dev_info, unsigned int cmd, void __user *arg, | |||
871 | return err; | 871 | return err; |
872 | 872 | ||
873 | case SNDCTL_COPR_SENDMSG: | 873 | case SNDCTL_COPR_SENDMSG: |
874 | mbuf = (copr_msg *)vmalloc(sizeof(copr_msg)); | 874 | mbuf = vmalloc(sizeof(copr_msg)); |
875 | if (mbuf == NULL) | 875 | if (mbuf == NULL) |
876 | return -ENOSPC; | 876 | return -ENOSPC; |
877 | if (copy_from_user(mbuf, arg, sizeof(copr_msg))) { | 877 | if (copy_from_user(mbuf, arg, sizeof(copr_msg))) { |
@@ -895,7 +895,7 @@ static int pss_coproc_ioctl(void *dev_info, unsigned int cmd, void __user *arg, | |||
895 | 895 | ||
896 | case SNDCTL_COPR_RCVMSG: | 896 | case SNDCTL_COPR_RCVMSG: |
897 | err = 0; | 897 | err = 0; |
898 | mbuf = (copr_msg *)vmalloc(sizeof(copr_msg)); | 898 | mbuf = vmalloc(sizeof(copr_msg)); |
899 | if (mbuf == NULL) | 899 | if (mbuf == NULL) |
900 | return -ENOSPC; | 900 | return -ENOSPC; |
901 | data = (unsigned short *)mbuf->data; | 901 | data = (unsigned short *)mbuf->data; |
diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c index e85789e53816..5ea1098ac427 100644 --- a/sound/oss/sequencer.c +++ b/sound/oss/sequencer.c | |||
@@ -1646,13 +1646,13 @@ void sequencer_init(void) | |||
1646 | { | 1646 | { |
1647 | if (sequencer_ok) | 1647 | if (sequencer_ok) |
1648 | return; | 1648 | return; |
1649 | queue = (unsigned char *)vmalloc(SEQ_MAX_QUEUE * EV_SZ); | 1649 | queue = vmalloc(SEQ_MAX_QUEUE * EV_SZ); |
1650 | if (queue == NULL) | 1650 | if (queue == NULL) |
1651 | { | 1651 | { |
1652 | printk(KERN_ERR "sequencer: Can't allocate memory for sequencer output queue\n"); | 1652 | printk(KERN_ERR "sequencer: Can't allocate memory for sequencer output queue\n"); |
1653 | return; | 1653 | return; |
1654 | } | 1654 | } |
1655 | iqueue = (unsigned char *)vmalloc(SEQ_MAX_QUEUE * IEV_SZ); | 1655 | iqueue = vmalloc(SEQ_MAX_QUEUE * IEV_SZ); |
1656 | if (iqueue == NULL) | 1656 | if (iqueue == NULL) |
1657 | { | 1657 | { |
1658 | printk(KERN_ERR "sequencer: Can't allocate memory for sequencer input queue\n"); | 1658 | printk(KERN_ERR "sequencer: Can't allocate memory for sequencer input queue\n"); |
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c index 62895a719fcb..22dbd91811a4 100644 --- a/sound/pci/asihpi/hpioctl.c +++ b/sound/pci/asihpi/hpioctl.c | |||
@@ -435,7 +435,7 @@ void __devexit asihpi_adapter_remove(struct pci_dev *pci_dev) | |||
435 | struct hpi_message hm; | 435 | struct hpi_message hm; |
436 | struct hpi_response hr; | 436 | struct hpi_response hr; |
437 | struct hpi_adapter *pa; | 437 | struct hpi_adapter *pa; |
438 | pa = (struct hpi_adapter *)pci_get_drvdata(pci_dev); | 438 | pa = pci_get_drvdata(pci_dev); |
439 | 439 | ||
440 | hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM, | 440 | hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM, |
441 | HPI_SUBSYS_DELETE_ADAPTER); | 441 | HPI_SUBSYS_DELETE_ADAPTER); |
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c index 4679ed83a43b..2f3cacbd5528 100644 --- a/sound/pci/azt3328.c +++ b/sound/pci/azt3328.c | |||
@@ -1129,10 +1129,11 @@ snd_azf3328_codec_setdmaa(struct snd_azf3328 *chip, | |||
1129 | 1129 | ||
1130 | count_areas = size/2; | 1130 | count_areas = size/2; |
1131 | addr_area2 = addr+count_areas; | 1131 | addr_area2 = addr+count_areas; |
1132 | count_areas--; /* max. index */ | ||
1133 | snd_azf3328_dbgcodec("setdma: buffers %08lx[%u] / %08lx[%u]\n", | 1132 | snd_azf3328_dbgcodec("setdma: buffers %08lx[%u] / %08lx[%u]\n", |
1134 | addr, count_areas, addr_area2, count_areas); | 1133 | addr, count_areas, addr_area2, count_areas); |
1135 | 1134 | ||
1135 | count_areas--; /* max. index */ | ||
1136 | |||
1136 | /* build combined I/O buffer length word */ | 1137 | /* build combined I/O buffer length word */ |
1137 | lengths = (count_areas << 16) | (count_areas); | 1138 | lengths = (count_areas << 16) | (count_areas); |
1138 | spin_lock_irqsave(&chip->reg_lock, flags); | 1139 | spin_lock_irqsave(&chip->reg_lock, flags); |
@@ -1740,11 +1741,15 @@ static const struct snd_pcm_hardware snd_azf3328_hardware = | |||
1740 | .rate_max = AZF_FREQ_66200, | 1741 | .rate_max = AZF_FREQ_66200, |
1741 | .channels_min = 1, | 1742 | .channels_min = 1, |
1742 | .channels_max = 2, | 1743 | .channels_max = 2, |
1743 | .buffer_bytes_max = 65536, | 1744 | .buffer_bytes_max = (64*1024), |
1744 | .period_bytes_min = 64, | 1745 | .period_bytes_min = 1024, |
1745 | .period_bytes_max = 65536, | 1746 | .period_bytes_max = (32*1024), |
1746 | .periods_min = 1, | 1747 | /* We simply have two DMA areas (instead of a list of descriptors |
1747 | .periods_max = 1024, | 1748 | such as other cards); I believe that this is a fixed hardware |
1749 | attribute and there isn't much driver magic to be done to expand it. | ||
1750 | Thus indicate that we have at least and at most 2 periods. */ | ||
1751 | .periods_min = 2, | ||
1752 | .periods_max = 2, | ||
1748 | /* FIXME: maybe that card actually has a FIFO? | 1753 | /* FIXME: maybe that card actually has a FIFO? |
1749 | * Hmm, it seems newer revisions do have one, but we still don't know | 1754 | * Hmm, it seems newer revisions do have one, but we still don't know |
1750 | * its size... */ | 1755 | * its size... */ |
@@ -1980,8 +1985,13 @@ snd_azf3328_timer_stop(struct snd_timer *timer) | |||
1980 | chip = snd_timer_chip(timer); | 1985 | chip = snd_timer_chip(timer); |
1981 | spin_lock_irqsave(&chip->reg_lock, flags); | 1986 | spin_lock_irqsave(&chip->reg_lock, flags); |
1982 | /* disable timer countdown and interrupt */ | 1987 | /* disable timer countdown and interrupt */ |
1983 | /* FIXME: should we write TIMER_IRQ_ACK here? */ | 1988 | /* Hmm, should we write TIMER_IRQ_ACK here? |
1984 | snd_azf3328_ctrl_outb(chip, IDX_IO_TIMER_VALUE + 3, 0); | 1989 | YES indeed, otherwise a rogue timer operation - which prompts |
1990 | ALSA(?) to call repeated stop() in vain, but NOT start() - | ||
1991 | will never end (value 0x03 is kept shown in control byte). | ||
1992 | Simply manually poking 0x04 _once_ immediately successfully stops | ||
1993 | the hardware/ALSA interrupt activity. */ | ||
1994 | snd_azf3328_ctrl_outb(chip, IDX_IO_TIMER_VALUE + 3, 0x04); | ||
1985 | spin_unlock_irqrestore(&chip->reg_lock, flags); | 1995 | spin_unlock_irqrestore(&chip->reg_lock, flags); |
1986 | snd_azf3328_dbgcallleave(); | 1996 | snd_azf3328_dbgcallleave(); |
1987 | return 0; | 1997 | return 0; |
diff --git a/sound/pci/ctxfi/ctpcm.c b/sound/pci/ctxfi/ctpcm.c index 85ab43e89212..457d21189b0d 100644 --- a/sound/pci/ctxfi/ctpcm.c +++ b/sound/pci/ctxfi/ctpcm.c | |||
@@ -129,8 +129,6 @@ static int ct_pcm_playback_open(struct snd_pcm_substream *substream) | |||
129 | 129 | ||
130 | apcm->substream = substream; | 130 | apcm->substream = substream; |
131 | apcm->interrupt = ct_atc_pcm_interrupt; | 131 | apcm->interrupt = ct_atc_pcm_interrupt; |
132 | runtime->private_data = apcm; | ||
133 | runtime->private_free = ct_atc_pcm_free_substream; | ||
134 | if (IEC958 == substream->pcm->device) { | 132 | if (IEC958 == substream->pcm->device) { |
135 | runtime->hw = ct_spdif_passthru_playback_hw; | 133 | runtime->hw = ct_spdif_passthru_playback_hw; |
136 | atc->spdif_out_passthru(atc, 1); | 134 | atc->spdif_out_passthru(atc, 1); |
@@ -155,8 +153,12 @@ static int ct_pcm_playback_open(struct snd_pcm_substream *substream) | |||
155 | } | 153 | } |
156 | 154 | ||
157 | apcm->timer = ct_timer_instance_new(atc->timer, apcm); | 155 | apcm->timer = ct_timer_instance_new(atc->timer, apcm); |
158 | if (!apcm->timer) | 156 | if (!apcm->timer) { |
157 | kfree(apcm); | ||
159 | return -ENOMEM; | 158 | return -ENOMEM; |
159 | } | ||
160 | runtime->private_data = apcm; | ||
161 | runtime->private_free = ct_atc_pcm_free_substream; | ||
160 | 162 | ||
161 | return 0; | 163 | return 0; |
162 | } | 164 | } |
@@ -278,8 +280,6 @@ static int ct_pcm_capture_open(struct snd_pcm_substream *substream) | |||
278 | apcm->started = 0; | 280 | apcm->started = 0; |
279 | apcm->substream = substream; | 281 | apcm->substream = substream; |
280 | apcm->interrupt = ct_atc_pcm_interrupt; | 282 | apcm->interrupt = ct_atc_pcm_interrupt; |
281 | runtime->private_data = apcm; | ||
282 | runtime->private_free = ct_atc_pcm_free_substream; | ||
283 | runtime->hw = ct_pcm_capture_hw; | 283 | runtime->hw = ct_pcm_capture_hw; |
284 | runtime->hw.rate_max = atc->rsr * atc->msr; | 284 | runtime->hw.rate_max = atc->rsr * atc->msr; |
285 | 285 | ||
@@ -298,8 +298,12 @@ static int ct_pcm_capture_open(struct snd_pcm_substream *substream) | |||
298 | } | 298 | } |
299 | 299 | ||
300 | apcm->timer = ct_timer_instance_new(atc->timer, apcm); | 300 | apcm->timer = ct_timer_instance_new(atc->timer, apcm); |
301 | if (!apcm->timer) | 301 | if (!apcm->timer) { |
302 | kfree(apcm); | ||
302 | return -ENOMEM; | 303 | return -ENOMEM; |
304 | } | ||
305 | runtime->private_data = apcm; | ||
306 | runtime->private_free = ct_atc_pcm_free_substream; | ||
303 | 307 | ||
304 | return 0; | 308 | return 0; |
305 | } | 309 | } |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 6361f752b5f3..846d1ead47fd 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -3100,6 +3100,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { | |||
3100 | SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), | 3100 | SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), |
3101 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), | 3101 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), |
3102 | SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), | 3102 | SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), |
3103 | SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_HP_LAPTOP), | ||
3103 | SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD), | 3104 | SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD), |
3104 | SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), | 3105 | SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), |
3105 | SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), | 3106 | SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), |
@@ -3110,6 +3111,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { | |||
3110 | SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), | 3111 | SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), |
3111 | SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD), | 3112 | SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD), |
3112 | SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), | 3113 | SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), |
3114 | SND_PCI_QUIRK(0x17aa, 0x21c8, "Thinkpad Edge 11", CXT5066_IDEAPAD), | ||
3113 | SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), | 3115 | SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), |
3114 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD), | 3116 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD), |
3115 | SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD), | 3117 | SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD), |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 5f00589cb791..0ac6aed0c889 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -14623,7 +14623,10 @@ static int alc275_setup_dual_adc(struct hda_codec *codec) | |||
14623 | /* different alc269-variants */ | 14623 | /* different alc269-variants */ |
14624 | enum { | 14624 | enum { |
14625 | ALC269_TYPE_NORMAL, | 14625 | ALC269_TYPE_NORMAL, |
14626 | ALC269_TYPE_ALC258, | ||
14626 | ALC269_TYPE_ALC259, | 14627 | ALC269_TYPE_ALC259, |
14628 | ALC269_TYPE_ALC269VB, | ||
14629 | ALC269_TYPE_ALC270, | ||
14627 | ALC269_TYPE_ALC271X, | 14630 | ALC269_TYPE_ALC271X, |
14628 | }; | 14631 | }; |
14629 | 14632 | ||
@@ -15023,7 +15026,7 @@ static int alc269_fill_coef(struct hda_codec *codec) | |||
15023 | static int patch_alc269(struct hda_codec *codec) | 15026 | static int patch_alc269(struct hda_codec *codec) |
15024 | { | 15027 | { |
15025 | struct alc_spec *spec; | 15028 | struct alc_spec *spec; |
15026 | int board_config; | 15029 | int board_config, coef; |
15027 | int err; | 15030 | int err; |
15028 | 15031 | ||
15029 | spec = kzalloc(sizeof(*spec), GFP_KERNEL); | 15032 | spec = kzalloc(sizeof(*spec), GFP_KERNEL); |
@@ -15034,14 +15037,23 @@ static int patch_alc269(struct hda_codec *codec) | |||
15034 | 15037 | ||
15035 | alc_auto_parse_customize_define(codec); | 15038 | alc_auto_parse_customize_define(codec); |
15036 | 15039 | ||
15037 | if ((alc_read_coef_idx(codec, 0) & 0x00f0) == 0x0010){ | 15040 | coef = alc_read_coef_idx(codec, 0); |
15041 | if ((coef & 0x00f0) == 0x0010) { | ||
15038 | if (codec->bus->pci->subsystem_vendor == 0x1025 && | 15042 | if (codec->bus->pci->subsystem_vendor == 0x1025 && |
15039 | spec->cdefine.platform_type == 1) { | 15043 | spec->cdefine.platform_type == 1) { |
15040 | alc_codec_rename(codec, "ALC271X"); | 15044 | alc_codec_rename(codec, "ALC271X"); |
15041 | spec->codec_variant = ALC269_TYPE_ALC271X; | 15045 | spec->codec_variant = ALC269_TYPE_ALC271X; |
15042 | } else { | 15046 | } else if ((coef & 0xf000) == 0x1000) { |
15047 | spec->codec_variant = ALC269_TYPE_ALC270; | ||
15048 | } else if ((coef & 0xf000) == 0x2000) { | ||
15043 | alc_codec_rename(codec, "ALC259"); | 15049 | alc_codec_rename(codec, "ALC259"); |
15044 | spec->codec_variant = ALC269_TYPE_ALC259; | 15050 | spec->codec_variant = ALC269_TYPE_ALC259; |
15051 | } else if ((coef & 0xf000) == 0x3000) { | ||
15052 | alc_codec_rename(codec, "ALC258"); | ||
15053 | spec->codec_variant = ALC269_TYPE_ALC258; | ||
15054 | } else { | ||
15055 | alc_codec_rename(codec, "ALC269VB"); | ||
15056 | spec->codec_variant = ALC269_TYPE_ALC269VB; | ||
15045 | } | 15057 | } |
15046 | } else | 15058 | } else |
15047 | alc_fix_pll_init(codec, 0x20, 0x04, 15); | 15059 | alc_fix_pll_init(codec, 0x20, 0x04, 15); |
@@ -15104,7 +15116,7 @@ static int patch_alc269(struct hda_codec *codec) | |||
15104 | spec->stream_digital_capture = &alc269_pcm_digital_capture; | 15116 | spec->stream_digital_capture = &alc269_pcm_digital_capture; |
15105 | 15117 | ||
15106 | if (!spec->adc_nids) { /* wasn't filled automatically? use default */ | 15118 | if (!spec->adc_nids) { /* wasn't filled automatically? use default */ |
15107 | if (spec->codec_variant != ALC269_TYPE_NORMAL) { | 15119 | if (spec->codec_variant == ALC269_TYPE_NORMAL) { |
15108 | spec->adc_nids = alc269_adc_nids; | 15120 | spec->adc_nids = alc269_adc_nids; |
15109 | spec->num_adc_nids = ARRAY_SIZE(alc269_adc_nids); | 15121 | spec->num_adc_nids = ARRAY_SIZE(alc269_adc_nids); |
15110 | spec->capsrc_nids = alc269_capsrc_nids; | 15122 | spec->capsrc_nids = alc269_capsrc_nids; |
@@ -19298,6 +19310,7 @@ static const struct alc_fixup alc662_fixups[] = { | |||
19298 | 19310 | ||
19299 | static struct snd_pci_quirk alc662_fixup_tbl[] = { | 19311 | static struct snd_pci_quirk alc662_fixup_tbl[] = { |
19300 | SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), | 19312 | SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), |
19313 | SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), | ||
19301 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), | 19314 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), |
19302 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), | 19315 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), |
19303 | {} | 19316 | {} |
@@ -19419,7 +19432,10 @@ static int patch_alc888(struct hda_codec *codec) | |||
19419 | { | 19432 | { |
19420 | if ((alc_read_coef_idx(codec, 0) & 0x00f0)==0x0030){ | 19433 | if ((alc_read_coef_idx(codec, 0) & 0x00f0)==0x0030){ |
19421 | kfree(codec->chip_name); | 19434 | kfree(codec->chip_name); |
19422 | codec->chip_name = kstrdup("ALC888-VD", GFP_KERNEL); | 19435 | if (codec->vendor_id == 0x10ec0887) |
19436 | codec->chip_name = kstrdup("ALC887-VD", GFP_KERNEL); | ||
19437 | else | ||
19438 | codec->chip_name = kstrdup("ALC888-VD", GFP_KERNEL); | ||
19423 | if (!codec->chip_name) { | 19439 | if (!codec->chip_name) { |
19424 | alc_free(codec); | 19440 | alc_free(codec); |
19425 | return -ENOMEM; | 19441 | return -ENOMEM; |
@@ -19909,7 +19925,7 @@ static struct hda_codec_preset snd_hda_preset_realtek[] = { | |||
19909 | { .id = 0x10ec0885, .rev = 0x100103, .name = "ALC889A", | 19925 | { .id = 0x10ec0885, .rev = 0x100103, .name = "ALC889A", |
19910 | .patch = patch_alc882 }, | 19926 | .patch = patch_alc882 }, |
19911 | { .id = 0x10ec0885, .name = "ALC885", .patch = patch_alc882 }, | 19927 | { .id = 0x10ec0885, .name = "ALC885", .patch = patch_alc882 }, |
19912 | { .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc882 }, | 19928 | { .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc888 }, |
19913 | { .id = 0x10ec0888, .rev = 0x100101, .name = "ALC1200", | 19929 | { .id = 0x10ec0888, .rev = 0x100101, .name = "ALC1200", |
19914 | .patch = patch_alc882 }, | 19930 | .patch = patch_alc882 }, |
19915 | { .id = 0x10ec0888, .name = "ALC888", .patch = patch_alc888 }, | 19931 | { .id = 0x10ec0888, .name = "ALC888", .patch = patch_alc888 }, |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 93fa59cc60ef..5c710807dfe5 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -389,6 +389,11 @@ static hda_nid_t stac92hd83xxx_dmic_nids[STAC92HD83XXX_NUM_DMICS + 1] = { | |||
389 | 0x11, 0x20, 0 | 389 | 0x11, 0x20, 0 |
390 | }; | 390 | }; |
391 | 391 | ||
392 | #define STAC92HD87B_NUM_DMICS 1 | ||
393 | static hda_nid_t stac92hd87b_dmic_nids[STAC92HD87B_NUM_DMICS + 1] = { | ||
394 | 0x11, 0 | ||
395 | }; | ||
396 | |||
392 | #define STAC92HD83XXX_NUM_CAPS 2 | 397 | #define STAC92HD83XXX_NUM_CAPS 2 |
393 | static unsigned long stac92hd83xxx_capvols[] = { | 398 | static unsigned long stac92hd83xxx_capvols[] = { |
394 | HDA_COMPOSE_AMP_VAL(0x17, 3, 0, HDA_OUTPUT), | 399 | HDA_COMPOSE_AMP_VAL(0x17, 3, 0, HDA_OUTPUT), |
@@ -3486,10 +3491,8 @@ static int stac92xx_auto_create_dmic_input_ctls(struct hda_codec *codec, | |||
3486 | return err; | 3491 | return err; |
3487 | } | 3492 | } |
3488 | 3493 | ||
3489 | if (snd_hda_get_bool_hint(codec, "separate_dmux") != 1) { | 3494 | if (snd_hda_get_bool_hint(codec, "separate_dmux") != 1) |
3490 | snd_hda_add_imux_item(imux, label, index, NULL); | 3495 | snd_hda_add_imux_item(imux, label, index, NULL); |
3491 | spec->num_analog_muxes++; | ||
3492 | } | ||
3493 | } | 3496 | } |
3494 | 3497 | ||
3495 | return 0; | 3498 | return 0; |
@@ -5452,12 +5455,17 @@ again: | |||
5452 | stac92hd83xxx_brd_tbl[spec->board_config]); | 5455 | stac92hd83xxx_brd_tbl[spec->board_config]); |
5453 | 5456 | ||
5454 | switch (codec->vendor_id) { | 5457 | switch (codec->vendor_id) { |
5458 | case 0x111d76d1: | ||
5459 | case 0x111d76d9: | ||
5460 | spec->dmic_nids = stac92hd87b_dmic_nids; | ||
5461 | spec->num_dmics = stac92xx_connected_ports(codec, | ||
5462 | stac92hd87b_dmic_nids, | ||
5463 | STAC92HD87B_NUM_DMICS); | ||
5464 | /* Fall through */ | ||
5455 | case 0x111d7666: | 5465 | case 0x111d7666: |
5456 | case 0x111d7667: | 5466 | case 0x111d7667: |
5457 | case 0x111d7668: | 5467 | case 0x111d7668: |
5458 | case 0x111d7669: | 5468 | case 0x111d7669: |
5459 | case 0x111d76d1: | ||
5460 | case 0x111d76d9: | ||
5461 | spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids); | 5469 | spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids); |
5462 | spec->pin_nids = stac92hd88xxx_pin_nids; | 5470 | spec->pin_nids = stac92hd88xxx_pin_nids; |
5463 | spec->mono_nid = 0; | 5471 | spec->mono_nid = 0; |
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c index 400f9ebd243e..629a5494347a 100644 --- a/sound/pci/intel8x0.c +++ b/sound/pci/intel8x0.c | |||
@@ -1866,6 +1866,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = { | |||
1866 | }, | 1866 | }, |
1867 | { | 1867 | { |
1868 | .subvendor = 0x1028, | 1868 | .subvendor = 0x1028, |
1869 | .subdevice = 0x0182, | ||
1870 | .name = "Dell Latitude D610", /* STAC9750/51 */ | ||
1871 | .type = AC97_TUNE_HP_ONLY | ||
1872 | }, | ||
1873 | { | ||
1874 | .subvendor = 0x1028, | ||
1869 | .subdevice = 0x0186, | 1875 | .subdevice = 0x0186, |
1870 | .name = "Dell Latitude D810", /* cf. Malone #41015 */ | 1876 | .name = "Dell Latitude D810", /* cf. Malone #41015 */ |
1871 | .type = AC97_TUNE_HP_MUTE_LED | 1877 | .type = AC97_TUNE_HP_MUTE_LED |
diff --git a/sound/pci/mixart/mixart_hwdep.h b/sound/pci/mixart/mixart_hwdep.h index a46f5083db99..812e288ef2e7 100644 --- a/sound/pci/mixart/mixart_hwdep.h +++ b/sound/pci/mixart/mixart_hwdep.h | |||
@@ -25,11 +25,21 @@ | |||
25 | 25 | ||
26 | #include <sound/hwdep.h> | 26 | #include <sound/hwdep.h> |
27 | 27 | ||
28 | #ifndef readl_be | ||
28 | #define readl_be(x) be32_to_cpu(__raw_readl(x)) | 29 | #define readl_be(x) be32_to_cpu(__raw_readl(x)) |
30 | #endif | ||
31 | |||
32 | #ifndef writel_be | ||
29 | #define writel_be(data,addr) __raw_writel(cpu_to_be32(data),addr) | 33 | #define writel_be(data,addr) __raw_writel(cpu_to_be32(data),addr) |
34 | #endif | ||
30 | 35 | ||
36 | #ifndef readl_le | ||
31 | #define readl_le(x) le32_to_cpu(__raw_readl(x)) | 37 | #define readl_le(x) le32_to_cpu(__raw_readl(x)) |
38 | #endif | ||
39 | |||
40 | #ifndef writel_le | ||
32 | #define writel_le(data,addr) __raw_writel(cpu_to_le32(data),addr) | 41 | #define writel_le(data,addr) __raw_writel(cpu_to_le32(data),addr) |
42 | #endif | ||
33 | 43 | ||
34 | #define MIXART_MEM(mgr,x) ((mgr)->mem[0].virt + (x)) | 44 | #define MIXART_MEM(mgr,x) ((mgr)->mem[0].virt + (x)) |
35 | #define MIXART_REG(mgr,x) ((mgr)->mem[1].virt + (x)) | 45 | #define MIXART_REG(mgr,x) ((mgr)->mem[1].virt + (x)) |
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c index 85081172403f..b47cfd45b3b9 100644 --- a/sound/ppc/pmac.c +++ b/sound/ppc/pmac.c | |||
@@ -1228,10 +1228,8 @@ int __devinit snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return) | |||
1228 | chip->rsrc[i].start + 1, | 1228 | chip->rsrc[i].start + 1, |
1229 | rnames[i]) == NULL) { | 1229 | rnames[i]) == NULL) { |
1230 | printk(KERN_ERR "snd: can't request rsrc " | 1230 | printk(KERN_ERR "snd: can't request rsrc " |
1231 | " %d (%s: 0x%016llx:%016llx)\n", | 1231 | " %d (%s: %pR)\n", |
1232 | i, rnames[i], | 1232 | i, rnames[i], &chip->rsrc[i]); |
1233 | (unsigned long long)chip->rsrc[i].start, | ||
1234 | (unsigned long long)chip->rsrc[i].end); | ||
1235 | err = -ENODEV; | 1233 | err = -ENODEV; |
1236 | goto __error; | 1234 | goto __error; |
1237 | } | 1235 | } |
@@ -1256,10 +1254,8 @@ int __devinit snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return) | |||
1256 | chip->rsrc[i].start + 1, | 1254 | chip->rsrc[i].start + 1, |
1257 | rnames[i]) == NULL) { | 1255 | rnames[i]) == NULL) { |
1258 | printk(KERN_ERR "snd: can't request rsrc " | 1256 | printk(KERN_ERR "snd: can't request rsrc " |
1259 | " %d (%s: 0x%016llx:%016llx)\n", | 1257 | " %d (%s: %pR)\n", |
1260 | i, rnames[i], | 1258 | i, rnames[i], &chip->rsrc[i]); |
1261 | (unsigned long long)chip->rsrc[i].start, | ||
1262 | (unsigned long long)chip->rsrc[i].end); | ||
1263 | err = -ENODEV; | 1259 | err = -ENODEV; |
1264 | goto __error; | 1260 | goto __error; |
1265 | } | 1261 | } |
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig index e720d5e6f04c..bee3c94f58b0 100644 --- a/sound/soc/atmel/Kconfig +++ b/sound/soc/atmel/Kconfig | |||
@@ -16,7 +16,8 @@ config SND_ATMEL_SOC_SSC | |||
16 | 16 | ||
17 | config SND_AT91_SOC_SAM9G20_WM8731 | 17 | config SND_AT91_SOC_SAM9G20_WM8731 |
18 | tristate "SoC Audio support for WM8731-based At91sam9g20 evaluation board" | 18 | tristate "SoC Audio support for WM8731-based At91sam9g20 evaluation board" |
19 | depends on ATMEL_SSC && ARCH_AT91SAM9G20 && SND_ATMEL_SOC | 19 | depends on ATMEL_SSC && ARCH_AT91SAM9G20 && SND_ATMEL_SOC && \ |
20 | AT91_PROGRAMMABLE_CLOCKS | ||
20 | select SND_ATMEL_SOC_SSC | 21 | select SND_ATMEL_SOC_SSC |
21 | select SND_SOC_WM8731 | 22 | select SND_SOC_WM8731 |
22 | help | 23 | help |
@@ -25,7 +26,7 @@ config SND_AT91_SOC_SAM9G20_WM8731 | |||
25 | 26 | ||
26 | config SND_AT32_SOC_PLAYPAQ | 27 | config SND_AT32_SOC_PLAYPAQ |
27 | tristate "SoC Audio support for PlayPaq with WM8510" | 28 | tristate "SoC Audio support for PlayPaq with WM8510" |
28 | depends on SND_ATMEL_SOC && BOARD_PLAYPAQ | 29 | depends on SND_ATMEL_SOC && BOARD_PLAYPAQ && AT91_PROGRAMMABLE_CLOCKS |
29 | select SND_ATMEL_SOC_SSC | 30 | select SND_ATMEL_SOC_SSC |
30 | select SND_SOC_WM8510 | 31 | select SND_SOC_WM8510 |
31 | help | 32 | help |
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c index bc22ee93a75d..470cb93b1d1f 100644 --- a/sound/soc/codecs/max98088.c +++ b/sound/soc/codecs/max98088.c | |||
@@ -28,6 +28,11 @@ | |||
28 | #include <sound/max98088.h> | 28 | #include <sound/max98088.h> |
29 | #include "max98088.h" | 29 | #include "max98088.h" |
30 | 30 | ||
31 | enum max98088_type { | ||
32 | MAX98088, | ||
33 | MAX98089, | ||
34 | }; | ||
35 | |||
31 | struct max98088_cdata { | 36 | struct max98088_cdata { |
32 | unsigned int rate; | 37 | unsigned int rate; |
33 | unsigned int fmt; | 38 | unsigned int fmt; |
@@ -36,6 +41,7 @@ struct max98088_cdata { | |||
36 | 41 | ||
37 | struct max98088_priv { | 42 | struct max98088_priv { |
38 | u8 reg_cache[M98088_REG_CNT]; | 43 | u8 reg_cache[M98088_REG_CNT]; |
44 | enum max98088_type devtype; | ||
39 | void *control_data; | 45 | void *control_data; |
40 | struct max98088_pdata *pdata; | 46 | struct max98088_pdata *pdata; |
41 | unsigned int sysclk; | 47 | unsigned int sysclk; |
@@ -2040,6 +2046,8 @@ static int max98088_i2c_probe(struct i2c_client *i2c, | |||
2040 | if (max98088 == NULL) | 2046 | if (max98088 == NULL) |
2041 | return -ENOMEM; | 2047 | return -ENOMEM; |
2042 | 2048 | ||
2049 | max98088->devtype = id->driver_data; | ||
2050 | |||
2043 | i2c_set_clientdata(i2c, max98088); | 2051 | i2c_set_clientdata(i2c, max98088); |
2044 | max98088->control_data = i2c; | 2052 | max98088->control_data = i2c; |
2045 | max98088->pdata = i2c->dev.platform_data; | 2053 | max98088->pdata = i2c->dev.platform_data; |
@@ -2059,7 +2067,8 @@ static int __devexit max98088_i2c_remove(struct i2c_client *client) | |||
2059 | } | 2067 | } |
2060 | 2068 | ||
2061 | static const struct i2c_device_id max98088_i2c_id[] = { | 2069 | static const struct i2c_device_id max98088_i2c_id[] = { |
2062 | { "max98088", 0 }, | 2070 | { "max98088", MAX98088 }, |
2071 | { "max98089", MAX98089 }, | ||
2063 | { } | 2072 | { } |
2064 | }; | 2073 | }; |
2065 | MODULE_DEVICE_TABLE(i2c, max98088_i2c_id); | 2074 | MODULE_DEVICE_TABLE(i2c, max98088_i2c_id); |
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c index 7540a509a6f5..464f0cfa4c7a 100644 --- a/sound/soc/codecs/uda134x.c +++ b/sound/soc/codecs/uda134x.c | |||
@@ -597,6 +597,7 @@ static struct snd_soc_codec_driver soc_codec_dev_uda134x = { | |||
597 | .resume = uda134x_soc_resume, | 597 | .resume = uda134x_soc_resume, |
598 | .reg_cache_size = sizeof(uda134x_reg), | 598 | .reg_cache_size = sizeof(uda134x_reg), |
599 | .reg_word_size = sizeof(u8), | 599 | .reg_word_size = sizeof(u8), |
600 | .reg_cache_default = uda134x_reg, | ||
600 | .reg_cache_step = 1, | 601 | .reg_cache_step = 1, |
601 | .read = uda134x_read_reg_cache, | 602 | .read = uda134x_read_reg_cache, |
602 | .write = uda134x_write, | 603 | .write = uda134x_write, |
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c index f4f1fba38eb9..7611add7f8c3 100644 --- a/sound/soc/codecs/wm8350.c +++ b/sound/soc/codecs/wm8350.c | |||
@@ -831,7 +831,7 @@ static int wm8350_set_dai_sysclk(struct snd_soc_dai *codec_dai, | |||
831 | } | 831 | } |
832 | 832 | ||
833 | /* MCLK direction */ | 833 | /* MCLK direction */ |
834 | if (dir == WM8350_MCLK_DIR_OUT) | 834 | if (dir == SND_SOC_CLOCK_OUT) |
835 | wm8350_set_bits(wm8350, WM8350_CLOCK_CONTROL_2, | 835 | wm8350_set_bits(wm8350, WM8350_CLOCK_CONTROL_2, |
836 | WM8350_MCLK_DIR); | 836 | WM8350_MCLK_DIR); |
837 | else | 837 | else |
@@ -1586,6 +1586,13 @@ static int wm8350_codec_probe(struct snd_soc_codec *codec) | |||
1586 | wm8350_set_bits(wm8350, WM8350_ROUT2_VOLUME, | 1586 | wm8350_set_bits(wm8350, WM8350_ROUT2_VOLUME, |
1587 | WM8350_OUT2_VU | WM8350_OUT2R_MUTE); | 1587 | WM8350_OUT2_VU | WM8350_OUT2R_MUTE); |
1588 | 1588 | ||
1589 | /* Make sure AIF tristating is disabled by default */ | ||
1590 | wm8350_clear_bits(wm8350, WM8350_AI_FORMATING, WM8350_AIF_TRI); | ||
1591 | |||
1592 | /* Make sure we've got a sane companding setup too */ | ||
1593 | wm8350_clear_bits(wm8350, WM8350_ADC_DAC_COMP, | ||
1594 | WM8350_DAC_COMP | WM8350_LOOPBACK); | ||
1595 | |||
1589 | /* Make sure jack detect is disabled to start off with */ | 1596 | /* Make sure jack detect is disabled to start off with */ |
1590 | wm8350_clear_bits(wm8350, WM8350_JACK_DETECT, | 1597 | wm8350_clear_bits(wm8350, WM8350_JACK_DETECT, |
1591 | WM8350_JDL_ENA | WM8350_JDR_ENA); | 1598 | WM8350_JDL_ENA | WM8350_JDR_ENA); |
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c index 04182c464e35..0132a27140ae 100644 --- a/sound/soc/codecs/wm8776.c +++ b/sound/soc/codecs/wm8776.c | |||
@@ -34,7 +34,6 @@ | |||
34 | /* codec private data */ | 34 | /* codec private data */ |
35 | struct wm8776_priv { | 35 | struct wm8776_priv { |
36 | enum snd_soc_control_type control_type; | 36 | enum snd_soc_control_type control_type; |
37 | u16 reg_cache[WM8776_CACHEREGNUM]; | ||
38 | int sysclk[2]; | 37 | int sysclk[2]; |
39 | }; | 38 | }; |
40 | 39 | ||
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 894d0cd3aa9b..e8092745a207 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c | |||
@@ -3500,8 +3500,11 @@ static ssize_t wm8962_beep_set(struct device *dev, | |||
3500 | { | 3500 | { |
3501 | struct wm8962_priv *wm8962 = dev_get_drvdata(dev); | 3501 | struct wm8962_priv *wm8962 = dev_get_drvdata(dev); |
3502 | long int time; | 3502 | long int time; |
3503 | int ret; | ||
3503 | 3504 | ||
3504 | strict_strtol(buf, 10, &time); | 3505 | ret = strict_strtol(buf, 10, &time); |
3506 | if (ret != 0) | ||
3507 | return ret; | ||
3505 | 3508 | ||
3506 | input_event(wm8962->beep, EV_SND, SND_TONE, time); | 3509 | input_event(wm8962->beep, EV_SND, SND_TONE, time); |
3507 | 3510 | ||
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 0db59c3aa5d4..830dfdd66c5f 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c | |||
@@ -3903,6 +3903,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) | |||
3903 | return -ENOMEM; | 3903 | return -ENOMEM; |
3904 | snd_soc_codec_set_drvdata(codec, wm8994); | 3904 | snd_soc_codec_set_drvdata(codec, wm8994); |
3905 | 3905 | ||
3906 | codec->reg_cache = &wm8994->reg_cache; | ||
3907 | |||
3906 | wm8994->pdata = dev_get_platdata(codec->dev->parent); | 3908 | wm8994->pdata = dev_get_platdata(codec->dev->parent); |
3907 | wm8994->codec = codec; | 3909 | wm8994->codec = codec; |
3908 | 3910 | ||
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c index 2b07b17a6b2d..bc9e6b0b3f6f 100644 --- a/sound/soc/davinci/davinci-evm.c +++ b/sound/soc/davinci/davinci-evm.c | |||
@@ -157,12 +157,23 @@ static int evm_aic3x_init(struct snd_soc_pcm_runtime *rtd) | |||
157 | } | 157 | } |
158 | 158 | ||
159 | /* davinci-evm digital audio interface glue - connects codec <--> CPU */ | 159 | /* davinci-evm digital audio interface glue - connects codec <--> CPU */ |
160 | static struct snd_soc_dai_link evm_dai = { | 160 | static struct snd_soc_dai_link dm6446_evm_dai = { |
161 | .name = "TLV320AIC3X", | 161 | .name = "TLV320AIC3X", |
162 | .stream_name = "AIC3X", | 162 | .stream_name = "AIC3X", |
163 | .cpu_dai_name = "davinci-mcasp.0", | 163 | .cpu_dai_name = "davinci-mcbsp", |
164 | .codec_dai_name = "tlv320aic3x-hifi", | 164 | .codec_dai_name = "tlv320aic3x-hifi", |
165 | .codec_name = "tlv320aic3x-codec.0-001a", | 165 | .codec_name = "tlv320aic3x-codec.1-001b", |
166 | .platform_name = "davinci-pcm-audio", | ||
167 | .init = evm_aic3x_init, | ||
168 | .ops = &evm_ops, | ||
169 | }; | ||
170 | |||
171 | static struct snd_soc_dai_link dm355_evm_dai = { | ||
172 | .name = "TLV320AIC3X", | ||
173 | .stream_name = "AIC3X", | ||
174 | .cpu_dai_name = "davinci-mcbsp.1", | ||
175 | .codec_dai_name = "tlv320aic3x-hifi", | ||
176 | .codec_name = "tlv320aic3x-codec.1-001b", | ||
166 | .platform_name = "davinci-pcm-audio", | 177 | .platform_name = "davinci-pcm-audio", |
167 | .init = evm_aic3x_init, | 178 | .init = evm_aic3x_init, |
168 | .ops = &evm_ops, | 179 | .ops = &evm_ops, |
@@ -172,10 +183,10 @@ static struct snd_soc_dai_link dm365_evm_dai = { | |||
172 | #ifdef CONFIG_SND_DM365_AIC3X_CODEC | 183 | #ifdef CONFIG_SND_DM365_AIC3X_CODEC |
173 | .name = "TLV320AIC3X", | 184 | .name = "TLV320AIC3X", |
174 | .stream_name = "AIC3X", | 185 | .stream_name = "AIC3X", |
175 | .cpu_dai_name = "davinci-i2s", | 186 | .cpu_dai_name = "davinci-mcbsp", |
176 | .codec_dai_name = "tlv320aic3x-hifi", | 187 | .codec_dai_name = "tlv320aic3x-hifi", |
177 | .init = evm_aic3x_init, | 188 | .init = evm_aic3x_init, |
178 | .codec_name = "tlv320aic3x-codec.0-001a", | 189 | .codec_name = "tlv320aic3x-codec.1-0018", |
179 | .ops = &evm_ops, | 190 | .ops = &evm_ops, |
180 | #elif defined(CONFIG_SND_DM365_VOICE_CODEC) | 191 | #elif defined(CONFIG_SND_DM365_VOICE_CODEC) |
181 | .name = "Voice Codec - CQ93VC", | 192 | .name = "Voice Codec - CQ93VC", |
@@ -219,10 +230,17 @@ static struct snd_soc_dai_link da8xx_evm_dai = { | |||
219 | .ops = &evm_ops, | 230 | .ops = &evm_ops, |
220 | }; | 231 | }; |
221 | 232 | ||
222 | /* davinci dm6446, dm355 evm audio machine driver */ | 233 | /* davinci dm6446 evm audio machine driver */ |
223 | static struct snd_soc_card snd_soc_card_evm = { | 234 | static struct snd_soc_card dm6446_snd_soc_card_evm = { |
224 | .name = "DaVinci EVM", | 235 | .name = "DaVinci DM6446 EVM", |
225 | .dai_link = &evm_dai, | 236 | .dai_link = &dm6446_evm_dai, |
237 | .num_links = 1, | ||
238 | }; | ||
239 | |||
240 | /* davinci dm355 evm audio machine driver */ | ||
241 | static struct snd_soc_card dm355_snd_soc_card_evm = { | ||
242 | .name = "DaVinci DM355 EVM", | ||
243 | .dai_link = &dm355_evm_dai, | ||
226 | .num_links = 1, | 244 | .num_links = 1, |
227 | }; | 245 | }; |
228 | 246 | ||
@@ -261,10 +279,10 @@ static int __init evm_init(void) | |||
261 | int ret; | 279 | int ret; |
262 | 280 | ||
263 | if (machine_is_davinci_evm()) { | 281 | if (machine_is_davinci_evm()) { |
264 | evm_snd_dev_data = &snd_soc_card_evm; | 282 | evm_snd_dev_data = &dm6446_snd_soc_card_evm; |
265 | index = 0; | 283 | index = 0; |
266 | } else if (machine_is_davinci_dm355_evm()) { | 284 | } else if (machine_is_davinci_dm355_evm()) { |
267 | evm_snd_dev_data = &snd_soc_card_evm; | 285 | evm_snd_dev_data = &dm355_snd_soc_card_evm; |
268 | index = 1; | 286 | index = 1; |
269 | } else if (machine_is_davinci_dm365_evm()) { | 287 | } else if (machine_is_davinci_dm365_evm()) { |
270 | evm_snd_dev_data = &dm365_snd_soc_card_evm; | 288 | evm_snd_dev_data = &dm365_snd_soc_card_evm; |
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c index d46b545d41f4..9e0e565e6ed9 100644 --- a/sound/soc/davinci/davinci-i2s.c +++ b/sound/soc/davinci/davinci-i2s.c | |||
@@ -426,9 +426,6 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream, | |||
426 | snd_pcm_format_t fmt; | 426 | snd_pcm_format_t fmt; |
427 | unsigned element_cnt = 1; | 427 | unsigned element_cnt = 1; |
428 | 428 | ||
429 | dai->capture_dma_data = dev->dma_params; | ||
430 | dai->playback_dma_data = dev->dma_params; | ||
431 | |||
432 | /* general line settings */ | 429 | /* general line settings */ |
433 | spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); | 430 | spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); |
434 | if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { | 431 | if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { |
@@ -601,6 +598,15 @@ static int davinci_i2s_trigger(struct snd_pcm_substream *substream, int cmd, | |||
601 | return ret; | 598 | return ret; |
602 | } | 599 | } |
603 | 600 | ||
601 | static int davinci_i2s_startup(struct snd_pcm_substream *substream, | ||
602 | struct snd_soc_dai *dai) | ||
603 | { | ||
604 | struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); | ||
605 | |||
606 | snd_soc_dai_set_dma_data(dai, substream, dev->dma_params); | ||
607 | return 0; | ||
608 | } | ||
609 | |||
604 | static void davinci_i2s_shutdown(struct snd_pcm_substream *substream, | 610 | static void davinci_i2s_shutdown(struct snd_pcm_substream *substream, |
605 | struct snd_soc_dai *dai) | 611 | struct snd_soc_dai *dai) |
606 | { | 612 | { |
@@ -612,6 +618,7 @@ static void davinci_i2s_shutdown(struct snd_pcm_substream *substream, | |||
612 | #define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000 | 618 | #define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000 |
613 | 619 | ||
614 | static struct snd_soc_dai_ops davinci_i2s_dai_ops = { | 620 | static struct snd_soc_dai_ops davinci_i2s_dai_ops = { |
621 | .startup = davinci_i2s_startup, | ||
615 | .shutdown = davinci_i2s_shutdown, | 622 | .shutdown = davinci_i2s_shutdown, |
616 | .prepare = davinci_i2s_prepare, | 623 | .prepare = davinci_i2s_prepare, |
617 | .trigger = davinci_i2s_trigger, | 624 | .trigger = davinci_i2s_trigger, |
@@ -749,7 +756,7 @@ static struct platform_driver davinci_mcbsp_driver = { | |||
749 | .probe = davinci_i2s_probe, | 756 | .probe = davinci_i2s_probe, |
750 | .remove = davinci_i2s_remove, | 757 | .remove = davinci_i2s_remove, |
751 | .driver = { | 758 | .driver = { |
752 | .name = "davinci-i2s", | 759 | .name = "davinci-mcbsp", |
753 | .owner = THIS_MODULE, | 760 | .owner = THIS_MODULE, |
754 | }, | 761 | }, |
755 | }; | 762 | }; |
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index 86918ee12419..fb55d2c5d704 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c | |||
@@ -715,9 +715,6 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream, | |||
715 | int word_length; | 715 | int word_length; |
716 | u8 fifo_level; | 716 | u8 fifo_level; |
717 | 717 | ||
718 | cpu_dai->capture_dma_data = dev->dma_params; | ||
719 | cpu_dai->playback_dma_data = dev->dma_params; | ||
720 | |||
721 | davinci_hw_common_param(dev, substream->stream); | 718 | davinci_hw_common_param(dev, substream->stream); |
722 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 719 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
723 | fifo_level = dev->txnumevt; | 720 | fifo_level = dev->txnumevt; |
@@ -799,7 +796,17 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream, | |||
799 | return ret; | 796 | return ret; |
800 | } | 797 | } |
801 | 798 | ||
799 | static int davinci_mcasp_startup(struct snd_pcm_substream *substream, | ||
800 | struct snd_soc_dai *dai) | ||
801 | { | ||
802 | struct davinci_audio_dev *dev = snd_soc_dai_get_drvdata(dai); | ||
803 | |||
804 | snd_soc_dai_set_dma_data(dai, substream, dev->dma_params); | ||
805 | return 0; | ||
806 | } | ||
807 | |||
802 | static struct snd_soc_dai_ops davinci_mcasp_dai_ops = { | 808 | static struct snd_soc_dai_ops davinci_mcasp_dai_ops = { |
809 | .startup = davinci_mcasp_startup, | ||
803 | .trigger = davinci_mcasp_trigger, | 810 | .trigger = davinci_mcasp_trigger, |
804 | .hw_params = davinci_mcasp_hw_params, | 811 | .hw_params = davinci_mcasp_hw_params, |
805 | .set_fmt = davinci_mcasp_set_dai_fmt, | 812 | .set_fmt = davinci_mcasp_set_dai_fmt, |
diff --git a/sound/soc/davinci/davinci-sffsdr.c b/sound/soc/davinci/davinci-sffsdr.c index 009b6521a1bf..6c6666a1f942 100644 --- a/sound/soc/davinci/davinci-sffsdr.c +++ b/sound/soc/davinci/davinci-sffsdr.c | |||
@@ -84,7 +84,7 @@ static struct snd_soc_ops sffsdr_ops = { | |||
84 | static struct snd_soc_dai_link sffsdr_dai = { | 84 | static struct snd_soc_dai_link sffsdr_dai = { |
85 | .name = "PCM3008", /* Codec name */ | 85 | .name = "PCM3008", /* Codec name */ |
86 | .stream_name = "PCM3008 HiFi", | 86 | .stream_name = "PCM3008 HiFi", |
87 | .cpu_dai_name = "davinci-asp.0", | 87 | .cpu_dai_name = "davinci-mcbsp", |
88 | .codec_dai_name = "pcm3008-hifi", | 88 | .codec_dai_name = "pcm3008-hifi", |
89 | .codec_name = "pcm3008-codec", | 89 | .codec_name = "pcm3008-codec", |
90 | .platform_name = "davinci-pcm-audio", | 90 | .platform_name = "davinci-pcm-audio", |
diff --git a/sound/soc/davinci/davinci-vcif.c b/sound/soc/davinci/davinci-vcif.c index ea232f6a2c21..fb4cc1edf339 100644 --- a/sound/soc/davinci/davinci-vcif.c +++ b/sound/soc/davinci/davinci-vcif.c | |||
@@ -97,9 +97,6 @@ static int davinci_vcif_hw_params(struct snd_pcm_substream *substream, | |||
97 | &davinci_vcif_dev->dma_params[substream->stream]; | 97 | &davinci_vcif_dev->dma_params[substream->stream]; |
98 | u32 w; | 98 | u32 w; |
99 | 99 | ||
100 | dai->capture_dma_data = davinci_vcif_dev->dma_params; | ||
101 | dai->playback_dma_data = davinci_vcif_dev->dma_params; | ||
102 | |||
103 | /* Restart the codec before setup */ | 100 | /* Restart the codec before setup */ |
104 | davinci_vcif_stop(substream); | 101 | davinci_vcif_stop(substream); |
105 | davinci_vcif_start(substream); | 102 | davinci_vcif_start(substream); |
@@ -174,9 +171,19 @@ static int davinci_vcif_trigger(struct snd_pcm_substream *substream, int cmd, | |||
174 | return ret; | 171 | return ret; |
175 | } | 172 | } |
176 | 173 | ||
174 | static int davinci_vcif_startup(struct snd_pcm_substream *substream, | ||
175 | struct snd_soc_dai *dai) | ||
176 | { | ||
177 | struct davinci_vcif_dev *dev = snd_soc_dai_get_drvdata(dai); | ||
178 | |||
179 | snd_soc_dai_set_dma_data(dai, substream, dev->dma_params); | ||
180 | return 0; | ||
181 | } | ||
182 | |||
177 | #define DAVINCI_VCIF_RATES SNDRV_PCM_RATE_8000_48000 | 183 | #define DAVINCI_VCIF_RATES SNDRV_PCM_RATE_8000_48000 |
178 | 184 | ||
179 | static struct snd_soc_dai_ops davinci_vcif_dai_ops = { | 185 | static struct snd_soc_dai_ops davinci_vcif_dai_ops = { |
186 | .startup = davinci_vcif_startup, | ||
180 | .trigger = davinci_vcif_trigger, | 187 | .trigger = davinci_vcif_trigger, |
181 | .hw_params = davinci_vcif_hw_params, | 188 | .hw_params = davinci_vcif_hw_params, |
182 | }; | 189 | }; |
diff --git a/sound/soc/fsl/mpc5200_psc_i2s.c b/sound/soc/fsl/mpc5200_psc_i2s.c index 74ffed41340f..9018fa5bf0db 100644 --- a/sound/soc/fsl/mpc5200_psc_i2s.c +++ b/sound/soc/fsl/mpc5200_psc_i2s.c | |||
@@ -160,7 +160,7 @@ static int __devinit psc_i2s_of_probe(struct platform_device *op, | |||
160 | rc = snd_soc_register_dais(&op->dev, psc_i2s_dai, ARRAY_SIZE(psc_i2s_dai)); | 160 | rc = snd_soc_register_dais(&op->dev, psc_i2s_dai, ARRAY_SIZE(psc_i2s_dai)); |
161 | if (rc != 0) { | 161 | if (rc != 0) { |
162 | pr_err("Failed to register DAI\n"); | 162 | pr_err("Failed to register DAI\n"); |
163 | return 0; | 163 | return rc; |
164 | } | 164 | } |
165 | 165 | ||
166 | psc_dma = dev_get_drvdata(&op->dev); | 166 | psc_dma = dev_get_drvdata(&op->dev); |
diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c index b59675257ce5..dd4fffdbd177 100644 --- a/sound/soc/imx/eukrea-tlv320.c +++ b/sound/soc/imx/eukrea-tlv320.c | |||
@@ -34,8 +34,8 @@ static int eukrea_tlv320_hw_params(struct snd_pcm_substream *substream, | |||
34 | struct snd_pcm_hw_params *params) | 34 | struct snd_pcm_hw_params *params) |
35 | { | 35 | { |
36 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 36 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
37 | struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; | 37 | struct snd_soc_dai *codec_dai = rtd->codec_dai; |
38 | struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; | 38 | struct snd_soc_dai *cpu_dai = rtd->cpu_dai; |
39 | int ret; | 39 | int ret; |
40 | 40 | ||
41 | ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | | 41 | ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | |
@@ -79,10 +79,10 @@ static struct snd_soc_ops eukrea_tlv320_snd_ops = { | |||
79 | static struct snd_soc_dai_link eukrea_tlv320_dai = { | 79 | static struct snd_soc_dai_link eukrea_tlv320_dai = { |
80 | .name = "tlv320aic23", | 80 | .name = "tlv320aic23", |
81 | .stream_name = "TLV320AIC23", | 81 | .stream_name = "TLV320AIC23", |
82 | .codec_dai = "tlv320aic23-hifi", | 82 | .codec_dai_name = "tlv320aic23-hifi", |
83 | .platform_name = "imx-pcm-audio.0", | 83 | .platform_name = "imx-pcm-audio.0", |
84 | .codec_name = "tlv320aic23-codec.0-001a", | 84 | .codec_name = "tlv320aic23-codec.0-001a", |
85 | .cpu_dai = "imx-ssi.0", | 85 | .cpu_dai_name = "imx-ssi.0", |
86 | .ops = &eukrea_tlv320_snd_ops, | 86 | .ops = &eukrea_tlv320_snd_ops, |
87 | }; | 87 | }; |
88 | 88 | ||
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c index fd493ee1428e..671ef8dd524c 100644 --- a/sound/soc/imx/imx-pcm-dma-mx2.c +++ b/sound/soc/imx/imx-pcm-dma-mx2.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/dmaengine.h> | ||
23 | 24 | ||
24 | #include <sound/core.h> | 25 | #include <sound/core.h> |
25 | #include <sound/initval.h> | 26 | #include <sound/initval.h> |
@@ -27,165 +28,146 @@ | |||
27 | #include <sound/pcm_params.h> | 28 | #include <sound/pcm_params.h> |
28 | #include <sound/soc.h> | 29 | #include <sound/soc.h> |
29 | 30 | ||
30 | #include <mach/dma-mx1-mx2.h> | 31 | #include <mach/dma.h> |
31 | 32 | ||
32 | #include "imx-ssi.h" | 33 | #include "imx-ssi.h" |
33 | 34 | ||
34 | struct imx_pcm_runtime_data { | 35 | struct imx_pcm_runtime_data { |
35 | int sg_count; | 36 | int period_bytes; |
36 | struct scatterlist *sg_list; | ||
37 | int period; | ||
38 | int periods; | 37 | int periods; |
39 | unsigned long dma_addr; | ||
40 | int dma; | 38 | int dma; |
41 | struct snd_pcm_substream *substream; | ||
42 | unsigned long offset; | 39 | unsigned long offset; |
43 | unsigned long size; | 40 | unsigned long size; |
44 | unsigned long period_cnt; | ||
45 | void *buf; | 41 | void *buf; |
46 | int period_time; | 42 | int period_time; |
43 | struct dma_async_tx_descriptor *desc; | ||
44 | struct dma_chan *dma_chan; | ||
45 | struct imx_dma_data dma_data; | ||
47 | }; | 46 | }; |
48 | 47 | ||
49 | /* Called by the DMA framework when a period has elapsed */ | 48 | static void audio_dma_irq(void *data) |
50 | static void imx_ssi_dma_progression(int channel, void *data, | ||
51 | struct scatterlist *sg) | ||
52 | { | 49 | { |
53 | struct snd_pcm_substream *substream = data; | 50 | struct snd_pcm_substream *substream = (struct snd_pcm_substream *)data; |
54 | struct snd_pcm_runtime *runtime = substream->runtime; | 51 | struct snd_pcm_runtime *runtime = substream->runtime; |
55 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | 52 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
56 | 53 | ||
57 | if (!sg) | 54 | iprtd->offset += iprtd->period_bytes; |
58 | return; | 55 | iprtd->offset %= iprtd->period_bytes * iprtd->periods; |
59 | |||
60 | runtime = iprtd->substream->runtime; | ||
61 | 56 | ||
62 | iprtd->offset = sg->dma_address - runtime->dma_addr; | 57 | snd_pcm_period_elapsed(substream); |
63 | |||
64 | snd_pcm_period_elapsed(iprtd->substream); | ||
65 | } | 58 | } |
66 | 59 | ||
67 | static void imx_ssi_dma_callback(int channel, void *data) | 60 | static bool filter(struct dma_chan *chan, void *param) |
68 | { | 61 | { |
69 | pr_err("%s shouldn't be called\n", __func__); | 62 | struct imx_pcm_runtime_data *iprtd = param; |
70 | } | ||
71 | 63 | ||
72 | static void snd_imx_dma_err_callback(int channel, void *data, int err) | 64 | if (!imx_dma_is_general_purpose(chan)) |
73 | { | 65 | return false; |
74 | struct snd_pcm_substream *substream = data; | ||
75 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | ||
76 | struct imx_pcm_dma_params *dma_params = | ||
77 | snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); | ||
78 | struct snd_pcm_runtime *runtime = substream->runtime; | ||
79 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | ||
80 | int ret; | ||
81 | 66 | ||
82 | pr_err("DMA timeout on channel %d -%s%s%s%s\n", | 67 | chan->private = &iprtd->dma_data; |
83 | channel, | ||
84 | err & IMX_DMA_ERR_BURST ? " burst" : "", | ||
85 | err & IMX_DMA_ERR_REQUEST ? " request" : "", | ||
86 | err & IMX_DMA_ERR_TRANSFER ? " transfer" : "", | ||
87 | err & IMX_DMA_ERR_BUFFER ? " buffer" : ""); | ||
88 | 68 | ||
89 | imx_dma_disable(iprtd->dma); | 69 | return true; |
90 | ret = imx_dma_setup_sg(iprtd->dma, iprtd->sg_list, iprtd->sg_count, | ||
91 | IMX_DMA_LENGTH_LOOP, dma_params->dma_addr, | ||
92 | substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? | ||
93 | DMA_MODE_WRITE : DMA_MODE_READ); | ||
94 | if (!ret) | ||
95 | imx_dma_enable(iprtd->dma); | ||
96 | } | 70 | } |
97 | 71 | ||
98 | static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream) | 72 | static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream, |
73 | struct snd_pcm_hw_params *params) | ||
99 | { | 74 | { |
100 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 75 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
101 | struct imx_pcm_dma_params *dma_params; | 76 | struct imx_pcm_dma_params *dma_params; |
102 | struct snd_pcm_runtime *runtime = substream->runtime; | 77 | struct snd_pcm_runtime *runtime = substream->runtime; |
103 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | 78 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
79 | struct dma_slave_config slave_config; | ||
80 | dma_cap_mask_t mask; | ||
81 | enum dma_slave_buswidth buswidth; | ||
104 | int ret; | 82 | int ret; |
105 | 83 | ||
106 | dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); | 84 | dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); |
107 | 85 | ||
108 | iprtd->dma = imx_dma_request_by_prio(DRV_NAME, DMA_PRIO_HIGH); | 86 | iprtd->dma_data.peripheral_type = IMX_DMATYPE_SSI; |
109 | if (iprtd->dma < 0) { | 87 | iprtd->dma_data.priority = DMA_PRIO_HIGH; |
110 | pr_err("Failed to claim the audio DMA\n"); | 88 | iprtd->dma_data.dma_request = dma_params->dma; |
111 | return -ENODEV; | ||
112 | } | ||
113 | 89 | ||
114 | ret = imx_dma_setup_handlers(iprtd->dma, | 90 | /* Try to grab a DMA channel */ |
115 | imx_ssi_dma_callback, | 91 | dma_cap_zero(mask); |
116 | snd_imx_dma_err_callback, substream); | 92 | dma_cap_set(DMA_SLAVE, mask); |
117 | if (ret) | 93 | iprtd->dma_chan = dma_request_channel(mask, filter, iprtd); |
118 | goto out; | 94 | if (!iprtd->dma_chan) |
95 | return -EINVAL; | ||
119 | 96 | ||
120 | ret = imx_dma_setup_progression_handler(iprtd->dma, | 97 | switch (params_format(params)) { |
121 | imx_ssi_dma_progression); | 98 | case SNDRV_PCM_FORMAT_S16_LE: |
122 | if (ret) { | 99 | buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; |
123 | pr_err("Failed to setup the DMA handler\n"); | 100 | break; |
124 | goto out; | 101 | case SNDRV_PCM_FORMAT_S20_3LE: |
102 | case SNDRV_PCM_FORMAT_S24_LE: | ||
103 | buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
104 | break; | ||
105 | default: | ||
106 | return 0; | ||
125 | } | 107 | } |
126 | 108 | ||
127 | ret = imx_dma_config_channel(iprtd->dma, | 109 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { |
128 | IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO, | 110 | slave_config.direction = DMA_TO_DEVICE; |
129 | IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, | 111 | slave_config.dst_addr = dma_params->dma_addr; |
130 | dma_params->dma, 1); | 112 | slave_config.dst_addr_width = buswidth; |
131 | if (ret < 0) { | 113 | slave_config.dst_maxburst = dma_params->burstsize; |
132 | pr_err("Cannot configure DMA channel: %d\n", ret); | 114 | } else { |
133 | goto out; | 115 | slave_config.direction = DMA_FROM_DEVICE; |
116 | slave_config.src_addr = dma_params->dma_addr; | ||
117 | slave_config.src_addr_width = buswidth; | ||
118 | slave_config.src_maxburst = dma_params->burstsize; | ||
134 | } | 119 | } |
135 | 120 | ||
136 | imx_dma_config_burstlen(iprtd->dma, dma_params->burstsize * 2); | 121 | ret = dmaengine_slave_config(iprtd->dma_chan, &slave_config); |
122 | if (ret) | ||
123 | return ret; | ||
137 | 124 | ||
138 | return 0; | 125 | return 0; |
139 | out: | ||
140 | imx_dma_free(iprtd->dma); | ||
141 | return ret; | ||
142 | } | 126 | } |
143 | 127 | ||
144 | static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream, | 128 | static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream, |
145 | struct snd_pcm_hw_params *params) | 129 | struct snd_pcm_hw_params *params) |
146 | { | 130 | { |
131 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | ||
147 | struct snd_pcm_runtime *runtime = substream->runtime; | 132 | struct snd_pcm_runtime *runtime = substream->runtime; |
148 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | 133 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
149 | int i; | ||
150 | unsigned long dma_addr; | 134 | unsigned long dma_addr; |
135 | struct dma_chan *chan; | ||
136 | struct imx_pcm_dma_params *dma_params; | ||
137 | int ret; | ||
151 | 138 | ||
152 | imx_ssi_dma_alloc(substream); | 139 | dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); |
140 | ret = imx_ssi_dma_alloc(substream, params); | ||
141 | if (ret) | ||
142 | return ret; | ||
143 | chan = iprtd->dma_chan; | ||
153 | 144 | ||
154 | iprtd->size = params_buffer_bytes(params); | 145 | iprtd->size = params_buffer_bytes(params); |
155 | iprtd->periods = params_periods(params); | 146 | iprtd->periods = params_periods(params); |
156 | iprtd->period = params_period_bytes(params); | 147 | iprtd->period_bytes = params_period_bytes(params); |
157 | iprtd->offset = 0; | 148 | iprtd->offset = 0; |
158 | iprtd->period_time = HZ / (params_rate(params) / | 149 | iprtd->period_time = HZ / (params_rate(params) / |
159 | params_period_size(params)); | 150 | params_period_size(params)); |
160 | 151 | ||
161 | snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); | 152 | snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); |
162 | 153 | ||
163 | if (iprtd->sg_count != iprtd->periods) { | ||
164 | kfree(iprtd->sg_list); | ||
165 | |||
166 | iprtd->sg_list = kcalloc(iprtd->periods + 1, | ||
167 | sizeof(struct scatterlist), GFP_KERNEL); | ||
168 | if (!iprtd->sg_list) | ||
169 | return -ENOMEM; | ||
170 | iprtd->sg_count = iprtd->periods + 1; | ||
171 | } | ||
172 | |||
173 | sg_init_table(iprtd->sg_list, iprtd->sg_count); | ||
174 | dma_addr = runtime->dma_addr; | 154 | dma_addr = runtime->dma_addr; |
175 | 155 | ||
176 | for (i = 0; i < iprtd->periods; i++) { | 156 | iprtd->buf = (unsigned int *)substream->dma_buffer.area; |
177 | iprtd->sg_list[i].page_link = 0; | 157 | |
178 | iprtd->sg_list[i].offset = 0; | 158 | iprtd->desc = chan->device->device_prep_dma_cyclic(chan, dma_addr, |
179 | iprtd->sg_list[i].dma_address = dma_addr; | 159 | iprtd->period_bytes * iprtd->periods, |
180 | iprtd->sg_list[i].length = iprtd->period; | 160 | iprtd->period_bytes, |
181 | dma_addr += iprtd->period; | 161 | substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? |
162 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
163 | if (!iprtd->desc) { | ||
164 | dev_err(&chan->dev->device, "cannot prepare slave dma\n"); | ||
165 | return -EINVAL; | ||
182 | } | 166 | } |
183 | 167 | ||
184 | /* close the loop */ | 168 | iprtd->desc->callback = audio_dma_irq; |
185 | iprtd->sg_list[iprtd->sg_count - 1].offset = 0; | 169 | iprtd->desc->callback_param = substream; |
186 | iprtd->sg_list[iprtd->sg_count - 1].length = 0; | 170 | |
187 | iprtd->sg_list[iprtd->sg_count - 1].page_link = | ||
188 | ((unsigned long) iprtd->sg_list | 0x01) & ~0x02; | ||
189 | return 0; | 171 | return 0; |
190 | } | 172 | } |
191 | 173 | ||
@@ -194,41 +176,21 @@ static int snd_imx_pcm_hw_free(struct snd_pcm_substream *substream) | |||
194 | struct snd_pcm_runtime *runtime = substream->runtime; | 176 | struct snd_pcm_runtime *runtime = substream->runtime; |
195 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | 177 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
196 | 178 | ||
197 | if (iprtd->dma >= 0) { | 179 | if (iprtd->dma_chan) { |
198 | imx_dma_free(iprtd->dma); | 180 | dma_release_channel(iprtd->dma_chan); |
199 | iprtd->dma = -EINVAL; | 181 | iprtd->dma_chan = NULL; |
200 | } | 182 | } |
201 | 183 | ||
202 | kfree(iprtd->sg_list); | ||
203 | iprtd->sg_list = NULL; | ||
204 | |||
205 | return 0; | 184 | return 0; |
206 | } | 185 | } |
207 | 186 | ||
208 | static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream) | 187 | static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream) |
209 | { | 188 | { |
210 | struct snd_pcm_runtime *runtime = substream->runtime; | ||
211 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 189 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
212 | struct imx_pcm_dma_params *dma_params; | 190 | struct imx_pcm_dma_params *dma_params; |
213 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | ||
214 | int err; | ||
215 | 191 | ||
216 | dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); | 192 | dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); |
217 | 193 | ||
218 | iprtd->substream = substream; | ||
219 | iprtd->buf = (unsigned int *)substream->dma_buffer.area; | ||
220 | iprtd->period_cnt = 0; | ||
221 | |||
222 | pr_debug("%s: buf: %p period: %d periods: %d\n", | ||
223 | __func__, iprtd->buf, iprtd->period, iprtd->periods); | ||
224 | |||
225 | err = imx_dma_setup_sg(iprtd->dma, iprtd->sg_list, iprtd->sg_count, | ||
226 | IMX_DMA_LENGTH_LOOP, dma_params->dma_addr, | ||
227 | substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? | ||
228 | DMA_MODE_WRITE : DMA_MODE_READ); | ||
229 | if (err) | ||
230 | return err; | ||
231 | |||
232 | return 0; | 194 | return 0; |
233 | } | 195 | } |
234 | 196 | ||
@@ -241,14 +203,14 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | |||
241 | case SNDRV_PCM_TRIGGER_START: | 203 | case SNDRV_PCM_TRIGGER_START: |
242 | case SNDRV_PCM_TRIGGER_RESUME: | 204 | case SNDRV_PCM_TRIGGER_RESUME: |
243 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: | 205 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: |
244 | imx_dma_enable(iprtd->dma); | 206 | dmaengine_submit(iprtd->desc); |
245 | 207 | ||
246 | break; | 208 | break; |
247 | 209 | ||
248 | case SNDRV_PCM_TRIGGER_STOP: | 210 | case SNDRV_PCM_TRIGGER_STOP: |
249 | case SNDRV_PCM_TRIGGER_SUSPEND: | 211 | case SNDRV_PCM_TRIGGER_SUSPEND: |
250 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: | 212 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: |
251 | imx_dma_disable(iprtd->dma); | 213 | dmaengine_terminate_all(iprtd->dma_chan); |
252 | 214 | ||
253 | break; | 215 | break; |
254 | default: | 216 | default: |
@@ -263,6 +225,9 @@ static snd_pcm_uframes_t snd_imx_pcm_pointer(struct snd_pcm_substream *substream | |||
263 | struct snd_pcm_runtime *runtime = substream->runtime; | 225 | struct snd_pcm_runtime *runtime = substream->runtime; |
264 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | 226 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
265 | 227 | ||
228 | pr_debug("%s: %ld %ld\n", __func__, iprtd->offset, | ||
229 | bytes_to_frames(substream->runtime, iprtd->offset)); | ||
230 | |||
266 | return bytes_to_frames(substream->runtime, iprtd->offset); | 231 | return bytes_to_frames(substream->runtime, iprtd->offset); |
267 | } | 232 | } |
268 | 233 | ||
@@ -279,7 +244,7 @@ static struct snd_pcm_hardware snd_imx_hardware = { | |||
279 | .channels_max = 2, | 244 | .channels_max = 2, |
280 | .buffer_bytes_max = IMX_SSI_DMABUF_SIZE, | 245 | .buffer_bytes_max = IMX_SSI_DMABUF_SIZE, |
281 | .period_bytes_min = 128, | 246 | .period_bytes_min = 128, |
282 | .period_bytes_max = 16 * 1024, | 247 | .period_bytes_max = 65535, /* Limited by SDMA engine */ |
283 | .periods_min = 2, | 248 | .periods_min = 2, |
284 | .periods_max = 255, | 249 | .periods_max = 255, |
285 | .fifo_size = 0, | 250 | .fifo_size = 0, |
@@ -304,11 +269,23 @@ static int snd_imx_open(struct snd_pcm_substream *substream) | |||
304 | } | 269 | } |
305 | 270 | ||
306 | snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware); | 271 | snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware); |
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static int snd_imx_close(struct snd_pcm_substream *substream) | ||
277 | { | ||
278 | struct snd_pcm_runtime *runtime = substream->runtime; | ||
279 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | ||
280 | |||
281 | kfree(iprtd); | ||
282 | |||
307 | return 0; | 283 | return 0; |
308 | } | 284 | } |
309 | 285 | ||
310 | static struct snd_pcm_ops imx_pcm_ops = { | 286 | static struct snd_pcm_ops imx_pcm_ops = { |
311 | .open = snd_imx_open, | 287 | .open = snd_imx_open, |
288 | .close = snd_imx_close, | ||
312 | .ioctl = snd_pcm_lib_ioctl, | 289 | .ioctl = snd_pcm_lib_ioctl, |
313 | .hw_params = snd_imx_pcm_hw_params, | 290 | .hw_params = snd_imx_pcm_hw_params, |
314 | .hw_free = snd_imx_pcm_hw_free, | 291 | .hw_free = snd_imx_pcm_hw_free, |
@@ -340,7 +317,6 @@ static struct platform_driver imx_pcm_driver = { | |||
340 | .name = "imx-pcm-audio", | 317 | .name = "imx-pcm-audio", |
341 | .owner = THIS_MODULE, | 318 | .owner = THIS_MODULE, |
342 | }, | 319 | }, |
343 | |||
344 | .probe = imx_soc_platform_probe, | 320 | .probe = imx_soc_platform_probe, |
345 | .remove = __devexit_p(imx_soc_platform_remove), | 321 | .remove = __devexit_p(imx_soc_platform_remove), |
346 | }; | 322 | }; |
@@ -356,4 +332,3 @@ static void __exit snd_imx_pcm_exit(void) | |||
356 | platform_driver_unregister(&imx_pcm_driver); | 332 | platform_driver_unregister(&imx_pcm_driver); |
357 | } | 333 | } |
358 | module_exit(snd_imx_pcm_exit); | 334 | module_exit(snd_imx_pcm_exit); |
359 | |||
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c index d4bd345b0a8d..d2d98c75ee8a 100644 --- a/sound/soc/imx/imx-ssi.c +++ b/sound/soc/imx/imx-ssi.c | |||
@@ -439,7 +439,22 @@ void imx_pcm_free(struct snd_pcm *pcm) | |||
439 | } | 439 | } |
440 | EXPORT_SYMBOL_GPL(imx_pcm_free); | 440 | EXPORT_SYMBOL_GPL(imx_pcm_free); |
441 | 441 | ||
442 | static int imx_ssi_dai_probe(struct snd_soc_dai *dai) | ||
443 | { | ||
444 | struct imx_ssi *ssi = dev_get_drvdata(dai->dev); | ||
445 | uint32_t val; | ||
446 | |||
447 | snd_soc_dai_set_drvdata(dai, ssi); | ||
448 | |||
449 | val = SSI_SFCSR_TFWM0(ssi->dma_params_tx.burstsize) | | ||
450 | SSI_SFCSR_RFWM0(ssi->dma_params_rx.burstsize); | ||
451 | writel(val, ssi->base + SSI_SFCSR); | ||
452 | |||
453 | return 0; | ||
454 | } | ||
455 | |||
442 | static struct snd_soc_dai_driver imx_ssi_dai = { | 456 | static struct snd_soc_dai_driver imx_ssi_dai = { |
457 | .probe = imx_ssi_dai_probe, | ||
443 | .playback = { | 458 | .playback = { |
444 | .channels_min = 2, | 459 | .channels_min = 2, |
445 | .channels_max = 2, | 460 | .channels_max = 2, |
@@ -455,20 +470,6 @@ static struct snd_soc_dai_driver imx_ssi_dai = { | |||
455 | .ops = &imx_ssi_pcm_dai_ops, | 470 | .ops = &imx_ssi_pcm_dai_ops, |
456 | }; | 471 | }; |
457 | 472 | ||
458 | static int imx_ssi_dai_probe(struct snd_soc_dai *dai) | ||
459 | { | ||
460 | struct imx_ssi *ssi = dev_get_drvdata(dai->dev); | ||
461 | uint32_t val; | ||
462 | |||
463 | snd_soc_dai_set_drvdata(dai, ssi); | ||
464 | |||
465 | val = SSI_SFCSR_TFWM0(ssi->dma_params_tx.burstsize) | | ||
466 | SSI_SFCSR_RFWM0(ssi->dma_params_rx.burstsize); | ||
467 | writel(val, ssi->base + SSI_SFCSR); | ||
468 | |||
469 | return 0; | ||
470 | } | ||
471 | |||
472 | static struct snd_soc_dai_driver imx_ac97_dai = { | 473 | static struct snd_soc_dai_driver imx_ac97_dai = { |
473 | .probe = imx_ssi_dai_probe, | 474 | .probe = imx_ssi_dai_probe, |
474 | .ac97_control = 1, | 475 | .ac97_control = 1, |
@@ -677,7 +678,17 @@ static int imx_ssi_probe(struct platform_device *pdev) | |||
677 | goto failed_register; | 678 | goto failed_register; |
678 | } | 679 | } |
679 | 680 | ||
680 | ssi->soc_platform_pdev = platform_device_alloc("imx-fiq-pcm-audio", pdev->id); | 681 | ssi->soc_platform_pdev_fiq = platform_device_alloc("imx-fiq-pcm-audio", pdev->id); |
682 | if (!ssi->soc_platform_pdev_fiq) | ||
683 | goto failed_pdev_fiq_alloc; | ||
684 | platform_set_drvdata(ssi->soc_platform_pdev_fiq, ssi); | ||
685 | ret = platform_device_add(ssi->soc_platform_pdev_fiq); | ||
686 | if (ret) { | ||
687 | dev_err(&pdev->dev, "failed to add platform device\n"); | ||
688 | goto failed_pdev_fiq_add; | ||
689 | } | ||
690 | |||
691 | ssi->soc_platform_pdev = platform_device_alloc("imx-pcm-audio", pdev->id); | ||
681 | if (!ssi->soc_platform_pdev) | 692 | if (!ssi->soc_platform_pdev) |
682 | goto failed_pdev_alloc; | 693 | goto failed_pdev_alloc; |
683 | platform_set_drvdata(ssi->soc_platform_pdev, ssi); | 694 | platform_set_drvdata(ssi->soc_platform_pdev, ssi); |
@@ -692,6 +703,9 @@ static int imx_ssi_probe(struct platform_device *pdev) | |||
692 | failed_pdev_add: | 703 | failed_pdev_add: |
693 | platform_device_put(ssi->soc_platform_pdev); | 704 | platform_device_put(ssi->soc_platform_pdev); |
694 | failed_pdev_alloc: | 705 | failed_pdev_alloc: |
706 | failed_pdev_fiq_add: | ||
707 | platform_device_put(ssi->soc_platform_pdev_fiq); | ||
708 | failed_pdev_fiq_alloc: | ||
695 | snd_soc_unregister_dai(&pdev->dev); | 709 | snd_soc_unregister_dai(&pdev->dev); |
696 | failed_register: | 710 | failed_register: |
697 | failed_ac97: | 711 | failed_ac97: |
diff --git a/sound/soc/imx/imx-ssi.h b/sound/soc/imx/imx-ssi.h index 53b780d9b2b0..a4406a134892 100644 --- a/sound/soc/imx/imx-ssi.h +++ b/sound/soc/imx/imx-ssi.h | |||
@@ -185,6 +185,9 @@ | |||
185 | 185 | ||
186 | #define DRV_NAME "imx-ssi" | 186 | #define DRV_NAME "imx-ssi" |
187 | 187 | ||
188 | #include <linux/dmaengine.h> | ||
189 | #include <mach/dma.h> | ||
190 | |||
188 | struct imx_pcm_dma_params { | 191 | struct imx_pcm_dma_params { |
189 | int dma; | 192 | int dma; |
190 | unsigned long dma_addr; | 193 | unsigned long dma_addr; |
@@ -212,6 +215,7 @@ struct imx_ssi { | |||
212 | int enabled; | 215 | int enabled; |
213 | 216 | ||
214 | struct platform_device *soc_platform_pdev; | 217 | struct platform_device *soc_platform_pdev; |
218 | struct platform_device *soc_platform_pdev_fiq; | ||
215 | }; | 219 | }; |
216 | 220 | ||
217 | struct snd_soc_platform *imx_ssi_fiq_init(struct platform_device *pdev, | 221 | struct snd_soc_platform *imx_ssi_fiq_init(struct platform_device *pdev, |
diff --git a/sound/soc/imx/phycore-ac97.c b/sound/soc/imx/phycore-ac97.c index 6a65dd705519..39f23734781a 100644 --- a/sound/soc/imx/phycore-ac97.c +++ b/sound/soc/imx/phycore-ac97.c | |||
@@ -20,9 +20,6 @@ | |||
20 | #include <sound/soc-dapm.h> | 20 | #include <sound/soc-dapm.h> |
21 | #include <asm/mach-types.h> | 21 | #include <asm/mach-types.h> |
22 | 22 | ||
23 | #include "../codecs/wm9712.h" | ||
24 | #include "imx-ssi.h" | ||
25 | |||
26 | static struct snd_soc_card imx_phycore; | 23 | static struct snd_soc_card imx_phycore; |
27 | 24 | ||
28 | static struct snd_soc_ops imx_phycore_hifi_ops = { | 25 | static struct snd_soc_ops imx_phycore_hifi_ops = { |
@@ -41,7 +38,7 @@ static struct snd_soc_dai_link imx_phycore_dai_ac97[] = { | |||
41 | }; | 38 | }; |
42 | 39 | ||
43 | static struct snd_soc_card imx_phycore = { | 40 | static struct snd_soc_card imx_phycore = { |
44 | .name = "PhyCORE-audio", | 41 | .name = "PhyCORE-ac97-audio", |
45 | .dai_link = imx_phycore_dai_ac97, | 42 | .dai_link = imx_phycore_dai_ac97, |
46 | .num_links = ARRAY_SIZE(imx_phycore_dai_ac97), | 43 | .num_links = ARRAY_SIZE(imx_phycore_dai_ac97), |
47 | }; | 44 | }; |
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c index d211c9fa5a91..7e84f24b9a88 100644 --- a/sound/soc/omap/omap-mcbsp.c +++ b/sound/soc/omap/omap-mcbsp.c | |||
@@ -644,15 +644,23 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai, | |||
644 | 644 | ||
645 | 645 | ||
646 | case OMAP_MCBSP_CLKR_SRC_CLKR: | 646 | case OMAP_MCBSP_CLKR_SRC_CLKR: |
647 | if (cpu_class_is_omap1()) | ||
648 | break; | ||
647 | omap2_mcbsp1_mux_clkr_src(CLKR_SRC_CLKR); | 649 | omap2_mcbsp1_mux_clkr_src(CLKR_SRC_CLKR); |
648 | break; | 650 | break; |
649 | case OMAP_MCBSP_CLKR_SRC_CLKX: | 651 | case OMAP_MCBSP_CLKR_SRC_CLKX: |
652 | if (cpu_class_is_omap1()) | ||
653 | break; | ||
650 | omap2_mcbsp1_mux_clkr_src(CLKR_SRC_CLKX); | 654 | omap2_mcbsp1_mux_clkr_src(CLKR_SRC_CLKX); |
651 | break; | 655 | break; |
652 | case OMAP_MCBSP_FSR_SRC_FSR: | 656 | case OMAP_MCBSP_FSR_SRC_FSR: |
657 | if (cpu_class_is_omap1()) | ||
658 | break; | ||
653 | omap2_mcbsp1_mux_fsr_src(FSR_SRC_FSR); | 659 | omap2_mcbsp1_mux_fsr_src(FSR_SRC_FSR); |
654 | break; | 660 | break; |
655 | case OMAP_MCBSP_FSR_SRC_FSX: | 661 | case OMAP_MCBSP_FSR_SRC_FSX: |
662 | if (cpu_class_is_omap1()) | ||
663 | break; | ||
656 | omap2_mcbsp1_mux_fsr_src(FSR_SRC_FSX); | 664 | omap2_mcbsp1_mux_fsr_src(FSR_SRC_FSX); |
657 | break; | 665 | break; |
658 | default: | 666 | default: |
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c index 97e9423615c9..f451acd4935b 100644 --- a/sound/soc/pxa/corgi.c +++ b/sound/soc/pxa/corgi.c | |||
@@ -100,8 +100,13 @@ static int corgi_startup(struct snd_pcm_substream *substream) | |||
100 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 100 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
101 | struct snd_soc_codec *codec = rtd->codec; | 101 | struct snd_soc_codec *codec = rtd->codec; |
102 | 102 | ||
103 | mutex_lock(&codec->mutex); | ||
104 | |||
103 | /* check the jack status at stream startup */ | 105 | /* check the jack status at stream startup */ |
104 | corgi_ext_control(codec); | 106 | corgi_ext_control(codec); |
107 | |||
108 | mutex_unlock(&codec->mutex); | ||
109 | |||
105 | return 0; | 110 | return 0; |
106 | } | 111 | } |
107 | 112 | ||
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c index b8207ced4072..5ef0526924b9 100644 --- a/sound/soc/pxa/magician.c +++ b/sound/soc/pxa/magician.c | |||
@@ -72,9 +72,13 @@ static int magician_startup(struct snd_pcm_substream *substream) | |||
72 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 72 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
73 | struct snd_soc_codec *codec = rtd->codec; | 73 | struct snd_soc_codec *codec = rtd->codec; |
74 | 74 | ||
75 | mutex_lock(&codec->mutex); | ||
76 | |||
75 | /* check the jack status at stream startup */ | 77 | /* check the jack status at stream startup */ |
76 | magician_ext_control(codec); | 78 | magician_ext_control(codec); |
77 | 79 | ||
80 | mutex_unlock(&codec->mutex); | ||
81 | |||
78 | return 0; | 82 | return 0; |
79 | } | 83 | } |
80 | 84 | ||
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c index af84ee9c5e11..84edd0385a21 100644 --- a/sound/soc/pxa/poodle.c +++ b/sound/soc/pxa/poodle.c | |||
@@ -77,8 +77,13 @@ static int poodle_startup(struct snd_pcm_substream *substream) | |||
77 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 77 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
78 | struct snd_soc_codec *codec = rtd->codec; | 78 | struct snd_soc_codec *codec = rtd->codec; |
79 | 79 | ||
80 | mutex_lock(&codec->mutex); | ||
81 | |||
80 | /* check the jack status at stream startup */ | 82 | /* check the jack status at stream startup */ |
81 | poodle_ext_control(codec); | 83 | poodle_ext_control(codec); |
84 | |||
85 | mutex_unlock(&codec->mutex); | ||
86 | |||
82 | return 0; | 87 | return 0; |
83 | } | 88 | } |
84 | 89 | ||
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c index f470f360f4dd..0b30d7de24ec 100644 --- a/sound/soc/pxa/spitz.c +++ b/sound/soc/pxa/spitz.c | |||
@@ -108,8 +108,13 @@ static int spitz_startup(struct snd_pcm_substream *substream) | |||
108 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 108 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
109 | struct snd_soc_codec *codec = rtd->codec; | 109 | struct snd_soc_codec *codec = rtd->codec; |
110 | 110 | ||
111 | mutex_lock(&codec->mutex); | ||
112 | |||
111 | /* check the jack status at stream startup */ | 113 | /* check the jack status at stream startup */ |
112 | spitz_ext_control(codec); | 114 | spitz_ext_control(codec); |
115 | |||
116 | mutex_unlock(&codec->mutex); | ||
117 | |||
113 | return 0; | 118 | return 0; |
114 | } | 119 | } |
115 | 120 | ||
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c index 73d0edd8ded9..7b983f935454 100644 --- a/sound/soc/pxa/tosa.c +++ b/sound/soc/pxa/tosa.c | |||
@@ -81,8 +81,13 @@ static int tosa_startup(struct snd_pcm_substream *substream) | |||
81 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 81 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
82 | struct snd_soc_codec *codec = rtd->codec; | 82 | struct snd_soc_codec *codec = rtd->codec; |
83 | 83 | ||
84 | mutex_lock(&codec->mutex); | ||
85 | |||
84 | /* check the jack status at stream startup */ | 86 | /* check the jack status at stream startup */ |
85 | tosa_ext_control(codec); | 87 | tosa_ext_control(codec); |
88 | |||
89 | mutex_unlock(&codec->mutex); | ||
90 | |||
86 | return 0; | 91 | return 0; |
87 | } | 92 | } |
88 | 93 | ||
diff --git a/sound/soc/s3c24xx/Kconfig b/sound/soc/s3c24xx/Kconfig index 8a6b53ccd203..d85bf8a0abb2 100644 --- a/sound/soc/s3c24xx/Kconfig +++ b/sound/soc/s3c24xx/Kconfig | |||
@@ -2,6 +2,7 @@ config SND_S3C24XX_SOC | |||
2 | tristate "SoC Audio for the Samsung S3CXXXX chips" | 2 | tristate "SoC Audio for the Samsung S3CXXXX chips" |
3 | depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 | 3 | depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 |
4 | select S3C64XX_DMA if ARCH_S3C64XX | 4 | select S3C64XX_DMA if ARCH_S3C64XX |
5 | select S3C2410_DMA if ARCH_S3C2410 | ||
5 | help | 6 | help |
6 | Say Y or M if you want to add support for codecs attached to | 7 | Say Y or M if you want to add support for codecs attached to |
7 | the S3C24XX AC97 or I2S interfaces. You will also need to | 8 | the S3C24XX AC97 or I2S interfaces. You will also need to |
diff --git a/sound/soc/s3c24xx/rx1950_uda1380.c b/sound/soc/s3c24xx/rx1950_uda1380.c index ffd5cf2fb0a9..468cc11fdf47 100644 --- a/sound/soc/s3c24xx/rx1950_uda1380.c +++ b/sound/soc/s3c24xx/rx1950_uda1380.c | |||
@@ -50,7 +50,6 @@ static unsigned int rates[] = { | |||
50 | 16000, | 50 | 16000, |
51 | 44100, | 51 | 44100, |
52 | 48000, | 52 | 48000, |
53 | 88200, | ||
54 | }; | 53 | }; |
55 | 54 | ||
56 | static struct snd_pcm_hw_constraint_list hw_rates = { | 55 | static struct snd_pcm_hw_constraint_list hw_rates = { |
@@ -130,7 +129,6 @@ static const struct snd_soc_dapm_route audio_map[] = { | |||
130 | }; | 129 | }; |
131 | 130 | ||
132 | static struct platform_device *s3c24xx_snd_device; | 131 | static struct platform_device *s3c24xx_snd_device; |
133 | static struct clk *xtal; | ||
134 | 132 | ||
135 | static int rx1950_startup(struct snd_pcm_substream *substream) | 133 | static int rx1950_startup(struct snd_pcm_substream *substream) |
136 | { | 134 | { |
@@ -179,10 +177,8 @@ static int rx1950_hw_params(struct snd_pcm_substream *substream, | |||
179 | case 44100: | 177 | case 44100: |
180 | case 88200: | 178 | case 88200: |
181 | clk_source = S3C24XX_CLKSRC_MPLL; | 179 | clk_source = S3C24XX_CLKSRC_MPLL; |
182 | fs_mode = S3C2410_IISMOD_256FS; | 180 | fs_mode = S3C2410_IISMOD_384FS; |
183 | div = clk_get_rate(xtal) / (256 * rate); | 181 | div = 1; |
184 | if (clk_get_rate(xtal) % (256 * rate) > (128 * rate)) | ||
185 | div++; | ||
186 | break; | 182 | break; |
187 | default: | 183 | default: |
188 | printk(KERN_ERR "%s: rate %d is not supported\n", | 184 | printk(KERN_ERR "%s: rate %d is not supported\n", |
@@ -210,7 +206,7 @@ static int rx1950_hw_params(struct snd_pcm_substream *substream, | |||
210 | 206 | ||
211 | /* set MCLK division for sample rate */ | 207 | /* set MCLK division for sample rate */ |
212 | ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK, | 208 | ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK, |
213 | S3C2410_IISMOD_384FS); | 209 | fs_mode); |
214 | if (ret < 0) | 210 | if (ret < 0) |
215 | return ret; | 211 | return ret; |
216 | 212 | ||
@@ -295,17 +291,8 @@ static int __init rx1950_init(void) | |||
295 | goto err_plat_add; | 291 | goto err_plat_add; |
296 | } | 292 | } |
297 | 293 | ||
298 | xtal = clk_get(&s3c24xx_snd_device->dev, "xtal"); | ||
299 | |||
300 | if (IS_ERR(xtal)) { | ||
301 | ret = PTR_ERR(xtal); | ||
302 | platform_device_unregister(s3c24xx_snd_device); | ||
303 | goto err_clk; | ||
304 | } | ||
305 | |||
306 | return 0; | 294 | return 0; |
307 | 295 | ||
308 | err_clk: | ||
309 | err_plat_add: | 296 | err_plat_add: |
310 | err_plat_alloc: | 297 | err_plat_alloc: |
311 | err_gpio_conf: | 298 | err_gpio_conf: |
@@ -320,7 +307,6 @@ static void __exit rx1950_exit(void) | |||
320 | platform_device_unregister(s3c24xx_snd_device); | 307 | platform_device_unregister(s3c24xx_snd_device); |
321 | snd_soc_jack_free_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios), | 308 | snd_soc_jack_free_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios), |
322 | hp_jack_gpios); | 309 | hp_jack_gpios); |
323 | clk_put(xtal); | ||
324 | gpio_free(S3C2410_GPA(1)); | 310 | gpio_free(S3C2410_GPA(1)); |
325 | } | 311 | } |
326 | 312 | ||
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 614a8b30d87b..441285ade024 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -3043,8 +3043,10 @@ int snd_soc_register_dais(struct device *dev, | |||
3043 | for (i = 0; i < count; i++) { | 3043 | for (i = 0; i < count; i++) { |
3044 | 3044 | ||
3045 | dai = kzalloc(sizeof(struct snd_soc_dai), GFP_KERNEL); | 3045 | dai = kzalloc(sizeof(struct snd_soc_dai), GFP_KERNEL); |
3046 | if (dai == NULL) | 3046 | if (dai == NULL) { |
3047 | return -ENOMEM; | 3047 | ret = -ENOMEM; |
3048 | goto err; | ||
3049 | } | ||
3048 | 3050 | ||
3049 | /* create DAI component name */ | 3051 | /* create DAI component name */ |
3050 | dai->name = fmt_multiple_name(dev, &dai_drv[i]); | 3052 | dai->name = fmt_multiple_name(dev, &dai_drv[i]); |
@@ -3263,9 +3265,6 @@ int snd_soc_register_codec(struct device *dev, | |||
3263 | return 0; | 3265 | return 0; |
3264 | 3266 | ||
3265 | error: | 3267 | error: |
3266 | for (i--; i >= 0; i--) | ||
3267 | snd_soc_unregister_dai(dev); | ||
3268 | |||
3269 | if (codec->reg_cache) | 3268 | if (codec->reg_cache) |
3270 | kfree(codec->reg_cache); | 3269 | kfree(codec->reg_cache); |
3271 | kfree(codec->name); | 3270 | kfree(codec->name); |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 7d85c6496afa..75ed6491222d 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -683,12 +683,12 @@ static int dapm_seq_compare(struct snd_soc_dapm_widget *a, | |||
683 | struct snd_soc_dapm_widget *b, | 683 | struct snd_soc_dapm_widget *b, |
684 | int sort[]) | 684 | int sort[]) |
685 | { | 685 | { |
686 | if (a->codec != b->codec) | ||
687 | return (unsigned long)a - (unsigned long)b; | ||
688 | if (sort[a->id] != sort[b->id]) | 686 | if (sort[a->id] != sort[b->id]) |
689 | return sort[a->id] - sort[b->id]; | 687 | return sort[a->id] - sort[b->id]; |
690 | if (a->reg != b->reg) | 688 | if (a->reg != b->reg) |
691 | return a->reg - b->reg; | 689 | return a->reg - b->reg; |
690 | if (a->codec != b->codec) | ||
691 | return (unsigned long)a->codec - (unsigned long)b->codec; | ||
692 | 692 | ||
693 | return 0; | 693 | return 0; |
694 | } | 694 | } |
diff --git a/sound/sound_core.c b/sound/sound_core.c index c03bbaefdbc3..5580aced8730 100644 --- a/sound/sound_core.c +++ b/sound/sound_core.c | |||
@@ -104,7 +104,6 @@ module_exit(cleanup_soundcore); | |||
104 | 104 | ||
105 | #include <linux/init.h> | 105 | #include <linux/init.h> |
106 | #include <linux/slab.h> | 106 | #include <linux/slab.h> |
107 | #include <linux/smp_lock.h> | ||
108 | #include <linux/types.h> | 107 | #include <linux/types.h> |
109 | #include <linux/kernel.h> | 108 | #include <linux/kernel.h> |
110 | #include <linux/sound.h> | 109 | #include <linux/sound.h> |
diff --git a/sound/spi/at73c213.c b/sound/spi/at73c213.c index 1bc56b2b94e2..337a00241a1f 100644 --- a/sound/spi/at73c213.c +++ b/sound/spi/at73c213.c | |||
@@ -155,7 +155,7 @@ static int snd_at73c213_set_bitrate(struct snd_at73c213 *chip) | |||
155 | if (max_tries < 1) | 155 | if (max_tries < 1) |
156 | max_tries = 1; | 156 | max_tries = 1; |
157 | 157 | ||
158 | /* ssc_div must be a power of 2. */ | 158 | /* ssc_div must be even. */ |
159 | ssc_div = (ssc_div + 1) & ~1UL; | 159 | ssc_div = (ssc_div + 1) & ~1UL; |
160 | 160 | ||
161 | if ((ssc_rate / (ssc_div * 2 * 16)) < BITRATE_MIN) { | 161 | if ((ssc_rate / (ssc_div * 2 * 16)) < BITRATE_MIN) { |