diff options
263 files changed, 2865 insertions, 2492 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 274b32d12532..492e81df2968 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
| @@ -387,26 +387,6 @@ Who: Tejun Heo <tj@kernel.org> | |||
| 387 | 387 | ||
| 388 | ---------------------------- | 388 | ---------------------------- |
| 389 | 389 | ||
| 390 | What: Support for lcd_switch and display_get in asus-laptop driver | ||
| 391 | When: March 2010 | ||
| 392 | Why: These two features use non-standard interfaces. There are the | ||
| 393 | only features that really need multiple path to guess what's | ||
| 394 | the right method name on a specific laptop. | ||
| 395 | |||
| 396 | Removing them will allow to remove a lot of code an significantly | ||
| 397 | clean the drivers. | ||
| 398 | |||
| 399 | This will affect the backlight code which won't be able to know | ||
| 400 | if the backlight is on or off. The platform display file will also be | ||
| 401 | write only (like the one in eeepc-laptop). | ||
| 402 | |||
| 403 | This should'nt affect a lot of user because they usually know | ||
| 404 | when their display is on or off. | ||
| 405 | |||
| 406 | Who: Corentin Chary <corentin.chary@gmail.com> | ||
| 407 | |||
| 408 | ---------------------------- | ||
| 409 | |||
| 410 | What: sysfs-class-rfkill state file | 390 | What: sysfs-class-rfkill state file |
| 411 | When: Feb 2014 | 391 | When: Feb 2014 |
| 412 | Files: net/rfkill/core.c | 392 | Files: net/rfkill/core.c |
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt new file mode 100644 index 000000000000..23fcb05175be --- /dev/null +++ b/Documentation/input/event-codes.txt | |||
| @@ -0,0 +1,262 @@ | |||
| 1 | The input protocol uses a map of types and codes to express input device values | ||
| 2 | to userspace. This document describes the types and codes and how and when they | ||
| 3 | may be used. | ||
| 4 | |||
| 5 | A single hardware event generates multiple input events. Each input event | ||
| 6 | contains the new value of a single data item. A special event type, EV_SYN, is | ||
| 7 | used to separate input events into packets of input data changes occurring at | ||
| 8 | the same moment in time. In the following, the term "event" refers to a single | ||
| 9 | input event encompassing a type, code, and value. | ||
| 10 | |||
| 11 | The input protocol is a stateful protocol. Events are emitted only when values | ||
| 12 | of event codes have changed. However, the state is maintained within the Linux | ||
| 13 | input subsystem; drivers do not need to maintain the state and may attempt to | ||
| 14 | emit unchanged values without harm. Userspace may obtain the current state of | ||
| 15 | event code values using the EVIOCG* ioctls defined in linux/input.h. The event | ||
| 16 | reports supported by a device are also provided by sysfs in | ||
| 17 | class/input/event*/device/capabilities/, and the properties of a device are | ||
| 18 | provided in class/input/event*/device/properties. | ||
| 19 | |||
| 20 | Types: | ||
| 21 | ========== | ||
| 22 | Types are groupings of codes under a logical input construct. Each type has a | ||
| 23 | set of applicable codes to be used in generating events. See the Codes section | ||
| 24 | for details on valid codes for each type. | ||
| 25 | |||
| 26 | * EV_SYN: | ||
| 27 | - Used as markers to separate events. Events may be separated in time or in | ||
| 28 | space, such as with the multitouch protocol. | ||
| 29 | |||
| 30 | * EV_KEY: | ||
| 31 | - Used to describe state changes of keyboards, buttons, or other key-like | ||
| 32 | devices. | ||
| 33 | |||
| 34 | * EV_REL: | ||
| 35 | - Used to describe relative axis value changes, e.g. moving the mouse 5 units | ||
| 36 | to the left. | ||
| 37 | |||
| 38 | * EV_ABS: | ||
| 39 | - Used to describe absolute axis value changes, e.g. describing the | ||
| 40 | coordinates of a touch on a touchscreen. | ||
| 41 | |||
| 42 | * EV_MSC: | ||
| 43 | - Used to describe miscellaneous input data that do not fit into other types. | ||
| 44 | |||
| 45 | * EV_SW: | ||
| 46 | - Used to describe binary state input switches. | ||
| 47 | |||
| 48 | * EV_LED: | ||
| 49 | - Used to turn LEDs on devices on and off. | ||
| 50 | |||
| 51 | * EV_SND: | ||
| 52 | - Used to output sound to devices. | ||
| 53 | |||
| 54 | * EV_REP: | ||
| 55 | - Used for autorepeating devices. | ||
| 56 | |||
| 57 | * EV_FF: | ||
| 58 | - Used to send force feedback commands to an input device. | ||
| 59 | |||
| 60 | * EV_PWR: | ||
| 61 | - A special type for power button and switch input. | ||
| 62 | |||
| 63 | * EV_FF_STATUS: | ||
| 64 | - Used to receive force feedback device status. | ||
| 65 | |||
| 66 | Codes: | ||
| 67 | ========== | ||
| 68 | Codes define the precise type of event. | ||
| 69 | |||
| 70 | EV_SYN: | ||
| 71 | ---------- | ||
| 72 | EV_SYN event values are undefined. Their usage is defined only by when they are | ||
| 73 | sent in the evdev event stream. | ||
| 74 | |||
| 75 | * SYN_REPORT: | ||
| 76 | - Used to synchronize and separate events into packets of input data changes | ||
| 77 | occurring at the same moment in time. For example, motion of a mouse may set | ||
| 78 | the REL_X and REL_Y values for one motion, then emit a SYN_REPORT. The next | ||
| 79 | motion will emit more REL_X and REL_Y values and send another SYN_REPORT. | ||
| 80 | |||
| 81 | * SYN_CONFIG: | ||
| 82 | - TBD | ||
| 83 | |||
| 84 | * SYN_MT_REPORT: | ||
| 85 | - Used to synchronize and separate touch events. See the | ||
| 86 | multi-touch-protocol.txt document for more information. | ||
| 87 | |||
| 88 | * SYN_DROPPED: | ||
| 89 | - Used to indicate buffer overrun in the evdev client's event queue. | ||
| 90 | Client should ignore all events up to and including next SYN_REPORT | ||
| 91 | event and query the device (using EVIOCG* ioctls) to obtain its | ||
| 92 | current state. | ||
| 93 | |||
| 94 | EV_KEY: | ||
| 95 | ---------- | ||
| 96 | EV_KEY events take the form KEY_<name> or BTN_<name>. For example, KEY_A is used | ||
| 97 | to represent the 'A' key on a keyboard. When a key is depressed, an event with | ||
| 98 | the key's code is emitted with value 1. When the key is released, an event is | ||
| 99 | emitted with value 0. Some hardware send events when a key is repeated. These | ||
| 100 | events have a value of 2. In general, KEY_<name> is used for keyboard keys, and | ||
| 101 | BTN_<name> is used for other types of momentary switch events. | ||
| 102 | |||
| 103 | A few EV_KEY codes have special meanings: | ||
| 104 | |||
| 105 | * BTN_TOOL_<name>: | ||
| 106 | - These codes are used in conjunction with input trackpads, tablets, and | ||
| 107 | touchscreens. These devices may be used with fingers, pens, or other tools. | ||
| 108 | When an event occurs and a tool is used, the corresponding BTN_TOOL_<name> | ||
| 109 | code should be set to a value of 1. When the tool is no longer interacting | ||
| 110 | with the input device, the BTN_TOOL_<name> code should be reset to 0. All | ||
| 111 | trackpads, tablets, and touchscreens should use at least one BTN_TOOL_<name> | ||
| 112 | code when events are generated. | ||
| 113 | |||
| 114 | * BTN_TOUCH: | ||
| 115 | BTN_TOUCH is used for touch contact. While an input tool is determined to be | ||
| 116 | within meaningful physical contact, the value of this property must be set | ||
| 117 | to 1. Meaningful physical contact may mean any contact, or it may mean | ||
| 118 | contact conditioned by an implementation defined property. For example, a | ||
| 119 | touchpad may set the value to 1 only when the touch pressure rises above a | ||
| 120 | certain value. BTN_TOUCH may be combined with BTN_TOOL_<name> codes. For | ||
| 121 | example, a pen tablet may set BTN_TOOL_PEN to 1 and BTN_TOUCH to 0 while the | ||
| 122 | pen is hovering over but not touching the tablet surface. | ||
| 123 | |||
| 124 | Note: For appropriate function of the legacy mousedev emulation driver, | ||
| 125 | BTN_TOUCH must be the first evdev code emitted in a synchronization frame. | ||
| 126 | |||
| 127 | Note: Historically a touch device with BTN_TOOL_FINGER and BTN_TOUCH was | ||
| 128 | interpreted as a touchpad by userspace, while a similar device without | ||
| 129 | BTN_TOOL_FINGER was interpreted as a touchscreen. For backwards compatibility | ||
| 130 | with current userspace it is recommended to follow this distinction. In the | ||
| 131 | future, this distinction will be deprecated and the device properties ioctl | ||
| 132 | EVIOCGPROP, defined in linux/input.h, will be used to convey the device type. | ||
| 133 | |||
| 134 | * BTN_TOOL_FINGER, BTN_TOOL_DOUBLETAP, BTN_TOOL_TRIPLETAP, BTN_TOOL_QUADTAP: | ||
| 135 | - These codes denote one, two, three, and four finger interaction on a | ||
| 136 | trackpad or touchscreen. For example, if the user uses two fingers and moves | ||
| 137 | them on the touchpad in an effort to scroll content on screen, | ||
| 138 | BTN_TOOL_DOUBLETAP should be set to value 1 for the duration of the motion. | ||
| 139 | Note that all BTN_TOOL_<name> codes and the BTN_TOUCH code are orthogonal in | ||
| 140 | purpose. A trackpad event generated by finger touches should generate events | ||
| 141 | for one code from each group. At most only one of these BTN_TOOL_<name> | ||
| 142 | codes should have a value of 1 during any synchronization frame. | ||
| 143 | |||
| 144 | Note: Historically some drivers emitted multiple of the finger count codes with | ||
| 145 | a value of 1 in the same synchronization frame. This usage is deprecated. | ||
| 146 | |||
| 147 | Note: In multitouch drivers, the input_mt_report_finger_count() function should | ||
| 148 | be used to emit these codes. Please see multi-touch-protocol.txt for details. | ||
| 149 | |||
| 150 | EV_REL: | ||
| 151 | ---------- | ||
| 152 | EV_REL events describe relative changes in a property. For example, a mouse may | ||
| 153 | move to the left by a certain number of units, but its absolute position in | ||
| 154 | space is unknown. If the absolute position is known, EV_ABS codes should be used | ||
| 155 | instead of EV_REL codes. | ||
| 156 | |||
| 157 | A few EV_REL codes have special meanings: | ||
| 158 | |||
| 159 | * REL_WHEEL, REL_HWHEEL: | ||
| 160 | - These codes are used for vertical and horizontal scroll wheels, | ||
| 161 | respectively. | ||
| 162 | |||
| 163 | EV_ABS: | ||
| 164 | ---------- | ||
| 165 | EV_ABS events describe absolute changes in a property. For example, a touchpad | ||
| 166 | may emit coordinates for a touch location. | ||
| 167 | |||
| 168 | A few EV_ABS codes have special meanings: | ||
| 169 | |||
| 170 | * ABS_DISTANCE: | ||
| 171 | - Used to describe the distance of a tool from an interaction surface. This | ||
| 172 | event should only be emitted while the tool is hovering, meaning in close | ||
| 173 | proximity of the device and while the value of the BTN_TOUCH code is 0. If | ||
| 174 | the input device may be used freely in three dimensions, consider ABS_Z | ||
| 175 | instead. | ||
| 176 | |||
| 177 | * ABS_MT_<name>: | ||
| 178 | - Used to describe multitouch input events. Please see | ||
| 179 | multi-touch-protocol.txt for details. | ||
| 180 | |||
| 181 | EV_SW: | ||
| 182 | ---------- | ||
| 183 | EV_SW events describe stateful binary switches. For example, the SW_LID code is | ||
| 184 | used to denote when a laptop lid is closed. | ||
| 185 | |||
| 186 | Upon binding to a device or resuming from suspend, a driver must report | ||
| 187 | the current switch state. This ensures that the device, kernel, and userspace | ||
| 188 | state is in sync. | ||
| 189 | |||
| 190 | Upon resume, if the switch state is the same as before suspend, then the input | ||
| 191 | subsystem will filter out the duplicate switch state reports. The driver does | ||
| 192 | not need to keep the state of the switch at any time. | ||
| 193 | |||
| 194 | EV_MSC: | ||
| 195 | ---------- | ||
| 196 | EV_MSC events are used for input and output events that do not fall under other | ||
| 197 | categories. | ||
| 198 | |||
| 199 | EV_LED: | ||
| 200 | ---------- | ||
| 201 | EV_LED events are used for input and output to set and query the state of | ||
| 202 | various LEDs on devices. | ||
| 203 | |||
| 204 | EV_REP: | ||
| 205 | ---------- | ||
| 206 | EV_REP events are used for specifying autorepeating events. | ||
| 207 | |||
| 208 | EV_SND: | ||
| 209 | ---------- | ||
| 210 | EV_SND events are used for sending sound commands to simple sound output | ||
| 211 | devices. | ||
| 212 | |||
| 213 | EV_FF: | ||
| 214 | ---------- | ||
| 215 | EV_FF events are used to initialize a force feedback capable device and to cause | ||
| 216 | such device to feedback. | ||
| 217 | |||
| 218 | EV_PWR: | ||
| 219 | ---------- | ||
| 220 | EV_PWR events are a special type of event used specifically for power | ||
| 221 | mangement. Its usage is not well defined. To be addressed later. | ||
| 222 | |||
| 223 | Guidelines: | ||
| 224 | ========== | ||
| 225 | The guidelines below ensure proper single-touch and multi-finger functionality. | ||
| 226 | For multi-touch functionality, see the multi-touch-protocol.txt document for | ||
| 227 | more information. | ||
| 228 | |||
| 229 | Mice: | ||
| 230 | ---------- | ||
| 231 | REL_{X,Y} must be reported when the mouse moves. BTN_LEFT must be used to report | ||
| 232 | the primary button press. BTN_{MIDDLE,RIGHT,4,5,etc.} should be used to report | ||
| 233 | further buttons of the device. REL_WHEEL and REL_HWHEEL should be used to report | ||
| 234 | scroll wheel events where available. | ||
| 235 | |||
| 236 | Touchscreens: | ||
| 237 | ---------- | ||
| 238 | ABS_{X,Y} must be reported with the location of the touch. BTN_TOUCH must be | ||
| 239 | used to report when a touch is active on the screen. | ||
| 240 | BTN_{MOUSE,LEFT,MIDDLE,RIGHT} must not be reported as the result of touch | ||
| 241 | contact. BTN_TOOL_<name> events should be reported where possible. | ||
| 242 | |||
| 243 | Trackpads: | ||
| 244 | ---------- | ||
| 245 | Legacy trackpads that only provide relative position information must report | ||
| 246 | events like mice described above. | ||
| 247 | |||
| 248 | Trackpads that provide absolute touch position must report ABS_{X,Y} for the | ||
| 249 | location of the touch. BTN_TOUCH should be used to report when a touch is active | ||
| 250 | on the trackpad. Where multi-finger support is available, BTN_TOOL_<name> should | ||
| 251 | be used to report the number of touches active on the trackpad. | ||
| 252 | |||
| 253 | Tablets: | ||
| 254 | ---------- | ||
| 255 | BTN_TOOL_<name> events must be reported when a stylus or other tool is active on | ||
| 256 | the tablet. ABS_{X,Y} must be reported with the location of the tool. BTN_TOUCH | ||
| 257 | should be used to report when the tool is in contact with the tablet. | ||
| 258 | BTN_{STYLUS,STYLUS2} should be used to report buttons on the tool itself. Any | ||
| 259 | button may be used for buttons on the tablet except BTN_{MOUSE,LEFT}. | ||
| 260 | BTN_{0,1,2,etc} are good generic codes for unlabeled buttons. Do not use | ||
| 261 | meaningful buttons, like BTN_FORWARD, unless the button is labeled for that | ||
| 262 | purpose on the device. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 649600cb8ec9..ec3600306289 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -184,10 +184,9 @@ F: Documentation/filesystems/9p.txt | |||
| 184 | F: fs/9p/ | 184 | F: fs/9p/ |
| 185 | 185 | ||
| 186 | A2232 SERIAL BOARD DRIVER | 186 | A2232 SERIAL BOARD DRIVER |
| 187 | M: Enver Haase <A2232@gmx.net> | ||
| 188 | L: linux-m68k@lists.linux-m68k.org | 187 | L: linux-m68k@lists.linux-m68k.org |
| 189 | S: Maintained | 188 | S: Orphan |
| 190 | F: drivers/char/ser_a2232* | 189 | F: drivers/staging/generic_serial/ser_a2232* |
| 191 | 190 | ||
| 192 | AACRAID SCSI RAID DRIVER | 191 | AACRAID SCSI RAID DRIVER |
| 193 | M: Adaptec OEM Raid Solutions <aacraid@adaptec.com> | 192 | M: Adaptec OEM Raid Solutions <aacraid@adaptec.com> |
| @@ -877,6 +876,13 @@ F: arch/arm/mach-mv78xx0/ | |||
| 877 | F: arch/arm/mach-orion5x/ | 876 | F: arch/arm/mach-orion5x/ |
| 878 | F: arch/arm/plat-orion/ | 877 | F: arch/arm/plat-orion/ |
| 879 | 878 | ||
| 879 | ARM/Orion SoC/Technologic Systems TS-78xx platform support | ||
| 880 | M: Alexander Clouter <alex@digriz.org.uk> | ||
| 881 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
| 882 | W: http://www.digriz.org.uk/ts78xx/kernel | ||
| 883 | S: Maintained | ||
| 884 | F: arch/arm/mach-orion5x/ts78xx-* | ||
| 885 | |||
| 880 | ARM/MIOA701 MACHINE SUPPORT | 886 | ARM/MIOA701 MACHINE SUPPORT |
| 881 | M: Robert Jarzmik <robert.jarzmik@free.fr> | 887 | M: Robert Jarzmik <robert.jarzmik@free.fr> |
| 882 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 888 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| @@ -1063,7 +1069,7 @@ F: arch/arm/mach-shmobile/ | |||
| 1063 | F: drivers/sh/ | 1069 | F: drivers/sh/ |
| 1064 | 1070 | ||
| 1065 | ARM/TELECHIPS ARM ARCHITECTURE | 1071 | ARM/TELECHIPS ARM ARCHITECTURE |
| 1066 | M: "Hans J. Koch" <hjk@linutronix.de> | 1072 | M: "Hans J. Koch" <hjk@hansjkoch.de> |
| 1067 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1073 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| 1068 | S: Maintained | 1074 | S: Maintained |
| 1069 | F: arch/arm/plat-tcc/ | 1075 | F: arch/arm/plat-tcc/ |
| @@ -1823,11 +1829,10 @@ S: Maintained | |||
| 1823 | F: drivers/platform/x86/compal-laptop.c | 1829 | F: drivers/platform/x86/compal-laptop.c |
| 1824 | 1830 | ||
| 1825 | COMPUTONE INTELLIPORT MULTIPORT CARD | 1831 | COMPUTONE INTELLIPORT MULTIPORT CARD |
| 1826 | M: "Michael H. Warfield" <mhw@wittsend.com> | ||
| 1827 | W: http://www.wittsend.com/computone.html | 1832 | W: http://www.wittsend.com/computone.html |
| 1828 | S: Maintained | 1833 | S: Orphan |
| 1829 | F: Documentation/serial/computone.txt | 1834 | F: Documentation/serial/computone.txt |
| 1830 | F: drivers/char/ip2/ | 1835 | F: drivers/staging/tty/ip2/ |
| 1831 | 1836 | ||
| 1832 | CONEXANT ACCESSRUNNER USB DRIVER | 1837 | CONEXANT ACCESSRUNNER USB DRIVER |
| 1833 | M: Simon Arlott <cxacru@fire.lp0.eu> | 1838 | M: Simon Arlott <cxacru@fire.lp0.eu> |
| @@ -2010,7 +2015,7 @@ F: drivers/net/wan/cycx* | |||
| 2010 | CYCLADES ASYNC MUX DRIVER | 2015 | CYCLADES ASYNC MUX DRIVER |
| 2011 | W: http://www.cyclades.com/ | 2016 | W: http://www.cyclades.com/ |
| 2012 | S: Orphan | 2017 | S: Orphan |
| 2013 | F: drivers/char/cyclades.c | 2018 | F: drivers/tty/cyclades.c |
| 2014 | F: include/linux/cyclades.h | 2019 | F: include/linux/cyclades.h |
| 2015 | 2020 | ||
| 2016 | CYCLADES PC300 DRIVER | 2021 | CYCLADES PC300 DRIVER |
| @@ -2124,8 +2129,8 @@ L: Eng.Linux@digi.com | |||
| 2124 | W: http://www.digi.com | 2129 | W: http://www.digi.com |
| 2125 | S: Orphan | 2130 | S: Orphan |
| 2126 | F: Documentation/serial/digiepca.txt | 2131 | F: Documentation/serial/digiepca.txt |
| 2127 | F: drivers/char/epca* | 2132 | F: drivers/staging/tty/epca* |
| 2128 | F: drivers/char/digi* | 2133 | F: drivers/staging/tty/digi* |
| 2129 | 2134 | ||
| 2130 | DIOLAN U2C-12 I2C DRIVER | 2135 | DIOLAN U2C-12 I2C DRIVER |
| 2131 | M: Guenter Roeck <guenter.roeck@ericsson.com> | 2136 | M: Guenter Roeck <guenter.roeck@ericsson.com> |
| @@ -4077,7 +4082,7 @@ F: drivers/video/matrox/matroxfb_* | |||
| 4077 | F: include/linux/matroxfb.h | 4082 | F: include/linux/matroxfb.h |
| 4078 | 4083 | ||
| 4079 | MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER | 4084 | MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER |
| 4080 | M: "Hans J. Koch" <hjk@linutronix.de> | 4085 | M: "Hans J. Koch" <hjk@hansjkoch.de> |
| 4081 | L: lm-sensors@lm-sensors.org | 4086 | L: lm-sensors@lm-sensors.org |
| 4082 | S: Maintained | 4087 | S: Maintained |
| 4083 | F: Documentation/hwmon/max6650 | 4088 | F: Documentation/hwmon/max6650 |
| @@ -4192,7 +4197,7 @@ MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD | |||
| 4192 | M: Jiri Slaby <jirislaby@gmail.com> | 4197 | M: Jiri Slaby <jirislaby@gmail.com> |
| 4193 | S: Maintained | 4198 | S: Maintained |
| 4194 | F: Documentation/serial/moxa-smartio | 4199 | F: Documentation/serial/moxa-smartio |
| 4195 | F: drivers/char/mxser.* | 4200 | F: drivers/tty/mxser.* |
| 4196 | 4201 | ||
| 4197 | MSI LAPTOP SUPPORT | 4202 | MSI LAPTOP SUPPORT |
| 4198 | M: "Lee, Chun-Yi" <jlee@novell.com> | 4203 | M: "Lee, Chun-Yi" <jlee@novell.com> |
| @@ -4234,7 +4239,7 @@ F: sound/oss/msnd* | |||
| 4234 | 4239 | ||
| 4235 | MULTITECH MULTIPORT CARD (ISICOM) | 4240 | MULTITECH MULTIPORT CARD (ISICOM) |
| 4236 | S: Orphan | 4241 | S: Orphan |
| 4237 | F: drivers/char/isicom.c | 4242 | F: drivers/tty/isicom.c |
| 4238 | F: include/linux/isicom.h | 4243 | F: include/linux/isicom.h |
| 4239 | 4244 | ||
| 4240 | MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER | 4245 | MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER |
| @@ -5273,14 +5278,14 @@ F: drivers/memstick/host/r592.* | |||
| 5273 | RISCOM8 DRIVER | 5278 | RISCOM8 DRIVER |
| 5274 | S: Orphan | 5279 | S: Orphan |
| 5275 | F: Documentation/serial/riscom8.txt | 5280 | F: Documentation/serial/riscom8.txt |
| 5276 | F: drivers/char/riscom8* | 5281 | F: drivers/staging/tty/riscom8* |
| 5277 | 5282 | ||
| 5278 | ROCKETPORT DRIVER | 5283 | ROCKETPORT DRIVER |
| 5279 | P: Comtrol Corp. | 5284 | P: Comtrol Corp. |
| 5280 | W: http://www.comtrol.com | 5285 | W: http://www.comtrol.com |
| 5281 | S: Maintained | 5286 | S: Maintained |
| 5282 | F: Documentation/serial/rocket.txt | 5287 | F: Documentation/serial/rocket.txt |
| 5283 | F: drivers/char/rocket* | 5288 | F: drivers/tty/rocket* |
| 5284 | 5289 | ||
| 5285 | ROSE NETWORK LAYER | 5290 | ROSE NETWORK LAYER |
| 5286 | M: Ralf Baechle <ralf@linux-mips.org> | 5291 | M: Ralf Baechle <ralf@linux-mips.org> |
| @@ -5916,10 +5921,9 @@ F: arch/arm/mach-spear6xx/spear600.c | |||
| 5916 | F: arch/arm/mach-spear6xx/spear600_evb.c | 5921 | F: arch/arm/mach-spear6xx/spear600_evb.c |
| 5917 | 5922 | ||
| 5918 | SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER | 5923 | SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER |
| 5919 | M: Roger Wolff <R.E.Wolff@BitWizard.nl> | 5924 | S: Orphan |
| 5920 | S: Supported | ||
| 5921 | F: Documentation/serial/specialix.txt | 5925 | F: Documentation/serial/specialix.txt |
| 5922 | F: drivers/char/specialix* | 5926 | F: drivers/staging/tty/specialix* |
| 5923 | 5927 | ||
| 5924 | SPI SUBSYSTEM | 5928 | SPI SUBSYSTEM |
| 5925 | M: David Brownell <dbrownell@users.sourceforge.net> | 5929 | M: David Brownell <dbrownell@users.sourceforge.net> |
| @@ -5964,7 +5968,6 @@ F: arch/alpha/kernel/srm_env.c | |||
| 5964 | 5968 | ||
| 5965 | STABLE BRANCH | 5969 | STABLE BRANCH |
| 5966 | M: Greg Kroah-Hartman <greg@kroah.com> | 5970 | M: Greg Kroah-Hartman <greg@kroah.com> |
| 5967 | M: Chris Wright <chrisw@sous-sol.org> | ||
| 5968 | L: stable@kernel.org | 5971 | L: stable@kernel.org |
| 5969 | S: Maintained | 5972 | S: Maintained |
| 5970 | 5973 | ||
| @@ -6248,7 +6251,8 @@ M: Greg Ungerer <gerg@uclinux.org> | |||
| 6248 | W: http://www.uclinux.org/ | 6251 | W: http://www.uclinux.org/ |
| 6249 | L: uclinux-dev@uclinux.org (subscribers-only) | 6252 | L: uclinux-dev@uclinux.org (subscribers-only) |
| 6250 | S: Maintained | 6253 | S: Maintained |
| 6251 | F: arch/m68knommu/ | 6254 | F: arch/m68k/*/*_no.* |
| 6255 | F: arch/m68k/include/asm/*_no.* | ||
| 6252 | 6256 | ||
| 6253 | UCLINUX FOR RENESAS H8/300 (H8300) | 6257 | UCLINUX FOR RENESAS H8/300 (H8300) |
| 6254 | M: Yoshinori Sato <ysato@users.sourceforge.jp> | 6258 | M: Yoshinori Sato <ysato@users.sourceforge.jp> |
| @@ -6618,7 +6622,7 @@ F: fs/hostfs/ | |||
| 6618 | F: fs/hppfs/ | 6622 | F: fs/hppfs/ |
| 6619 | 6623 | ||
| 6620 | USERSPACE I/O (UIO) | 6624 | USERSPACE I/O (UIO) |
| 6621 | M: "Hans J. Koch" <hjk@linutronix.de> | 6625 | M: "Hans J. Koch" <hjk@hansjkoch.de> |
| 6622 | M: Greg Kroah-Hartman <gregkh@suse.de> | 6626 | M: Greg Kroah-Hartman <gregkh@suse.de> |
| 6623 | S: Maintained | 6627 | S: Maintained |
| 6624 | F: Documentation/DocBook/uio-howto.tmpl | 6628 | F: Documentation/DocBook/uio-howto.tmpl |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 2 | 1 | VERSION = 2 |
| 2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
| 3 | SUBLEVEL = 39 | 3 | SUBLEVEL = 39 |
| 4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc4 |
| 5 | NAME = Flesh-Eating Bats with Fangs | 5 | NAME = Flesh-Eating Bats with Fangs |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile index 9bb7b858ed23..7a6d908bb865 100644 --- a/arch/alpha/kernel/Makefile +++ b/arch/alpha/kernel/Makefile | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | extra-y := head.o vmlinux.lds | 5 | extra-y := head.o vmlinux.lds |
| 6 | asflags-y := $(KBUILD_CFLAGS) | 6 | asflags-y := $(KBUILD_CFLAGS) |
| 7 | ccflags-y := -Werror -Wno-sign-compare | 7 | ccflags-y := -Wno-sign-compare |
| 8 | 8 | ||
| 9 | obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ | 9 | obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ |
| 10 | irq_alpha.o signal.o setup.o ptrace.o time.o \ | 10 | irq_alpha.o signal.o setup.o ptrace.o time.o \ |
diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c index 381fec0af52e..da7bcc372f16 100644 --- a/arch/alpha/kernel/core_mcpcia.c +++ b/arch/alpha/kernel/core_mcpcia.c | |||
| @@ -88,7 +88,7 @@ conf_read(unsigned long addr, unsigned char type1, | |||
| 88 | { | 88 | { |
| 89 | unsigned long flags; | 89 | unsigned long flags; |
| 90 | unsigned long mid = MCPCIA_HOSE2MID(hose->index); | 90 | unsigned long mid = MCPCIA_HOSE2MID(hose->index); |
| 91 | unsigned int stat0, value, temp, cpu; | 91 | unsigned int stat0, value, cpu; |
| 92 | 92 | ||
| 93 | cpu = smp_processor_id(); | 93 | cpu = smp_processor_id(); |
| 94 | 94 | ||
| @@ -101,7 +101,7 @@ conf_read(unsigned long addr, unsigned char type1, | |||
| 101 | stat0 = *(vuip)MCPCIA_CAP_ERR(mid); | 101 | stat0 = *(vuip)MCPCIA_CAP_ERR(mid); |
| 102 | *(vuip)MCPCIA_CAP_ERR(mid) = stat0; | 102 | *(vuip)MCPCIA_CAP_ERR(mid) = stat0; |
| 103 | mb(); | 103 | mb(); |
| 104 | temp = *(vuip)MCPCIA_CAP_ERR(mid); | 104 | *(vuip)MCPCIA_CAP_ERR(mid); |
| 105 | DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0)); | 105 | DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0)); |
| 106 | 106 | ||
| 107 | mb(); | 107 | mb(); |
| @@ -136,7 +136,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1, | |||
| 136 | { | 136 | { |
| 137 | unsigned long flags; | 137 | unsigned long flags; |
| 138 | unsigned long mid = MCPCIA_HOSE2MID(hose->index); | 138 | unsigned long mid = MCPCIA_HOSE2MID(hose->index); |
| 139 | unsigned int stat0, temp, cpu; | 139 | unsigned int stat0, cpu; |
| 140 | 140 | ||
| 141 | cpu = smp_processor_id(); | 141 | cpu = smp_processor_id(); |
| 142 | 142 | ||
| @@ -145,7 +145,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1, | |||
| 145 | /* Reset status register to avoid losing errors. */ | 145 | /* Reset status register to avoid losing errors. */ |
| 146 | stat0 = *(vuip)MCPCIA_CAP_ERR(mid); | 146 | stat0 = *(vuip)MCPCIA_CAP_ERR(mid); |
| 147 | *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); | 147 | *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); |
| 148 | temp = *(vuip)MCPCIA_CAP_ERR(mid); | 148 | *(vuip)MCPCIA_CAP_ERR(mid); |
| 149 | DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0)); | 149 | DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0)); |
| 150 | 150 | ||
| 151 | draina(); | 151 | draina(); |
| @@ -157,7 +157,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1, | |||
| 157 | *((vuip)addr) = value; | 157 | *((vuip)addr) = value; |
| 158 | mb(); | 158 | mb(); |
| 159 | mb(); /* magic */ | 159 | mb(); /* magic */ |
| 160 | temp = *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ | 160 | *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ |
| 161 | mcheck_expected(cpu) = 0; | 161 | mcheck_expected(cpu) = 0; |
| 162 | mb(); | 162 | mb(); |
| 163 | 163 | ||
| @@ -572,12 +572,10 @@ mcpcia_print_system_area(unsigned long la_ptr) | |||
| 572 | void | 572 | void |
| 573 | mcpcia_machine_check(unsigned long vector, unsigned long la_ptr) | 573 | mcpcia_machine_check(unsigned long vector, unsigned long la_ptr) |
| 574 | { | 574 | { |
| 575 | struct el_common *mchk_header; | ||
| 576 | struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; | 575 | struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; |
| 577 | unsigned int cpu = smp_processor_id(); | 576 | unsigned int cpu = smp_processor_id(); |
| 578 | int expected; | 577 | int expected; |
| 579 | 578 | ||
| 580 | mchk_header = (struct el_common *)la_ptr; | ||
| 581 | mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; | 579 | mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; |
| 582 | expected = mcheck_expected(cpu); | 580 | expected = mcheck_expected(cpu); |
| 583 | 581 | ||
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c index c3b3781a03de..14b26c466c89 100644 --- a/arch/alpha/kernel/err_titan.c +++ b/arch/alpha/kernel/err_titan.c | |||
| @@ -533,8 +533,6 @@ static struct el_subpacket_annotation el_titan_annotations[] = { | |||
| 533 | static struct el_subpacket * | 533 | static struct el_subpacket * |
| 534 | el_process_regatta_subpacket(struct el_subpacket *header) | 534 | el_process_regatta_subpacket(struct el_subpacket *header) |
| 535 | { | 535 | { |
| 536 | int status; | ||
| 537 | |||
| 538 | if (header->class != EL_CLASS__REGATTA_FAMILY) { | 536 | if (header->class != EL_CLASS__REGATTA_FAMILY) { |
| 539 | printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", | 537 | printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", |
| 540 | err_print_prefix, | 538 | err_print_prefix, |
| @@ -551,7 +549,7 @@ el_process_regatta_subpacket(struct el_subpacket *header) | |||
| 551 | printk("%s ** Occurred on CPU %d:\n", | 549 | printk("%s ** Occurred on CPU %d:\n", |
| 552 | err_print_prefix, | 550 | err_print_prefix, |
| 553 | (int)header->by_type.regatta_frame.cpuid); | 551 | (int)header->by_type.regatta_frame.cpuid); |
| 554 | status = privateer_process_logout_frame((struct el_common *) | 552 | privateer_process_logout_frame((struct el_common *) |
| 555 | header->by_type.regatta_frame.data_start, 1); | 553 | header->by_type.regatta_frame.data_start, 1); |
| 556 | break; | 554 | break; |
| 557 | default: | 555 | default: |
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 1479dc6ebd97..51b7fbd9e4c1 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c | |||
| @@ -228,7 +228,7 @@ struct irqaction timer_irqaction = { | |||
| 228 | void __init | 228 | void __init |
| 229 | init_rtc_irq(void) | 229 | init_rtc_irq(void) |
| 230 | { | 230 | { |
| 231 | irq_set_chip_and_handler_name(RTC_IRQ, &no_irq_chip, | 231 | irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip, |
| 232 | handle_simple_irq, "RTC"); | 232 | handle_simple_irq, "RTC"); |
| 233 | setup_irq(RTC_IRQ, &timer_irqaction); | 233 | setup_irq(RTC_IRQ, &timer_irqaction); |
| 234 | } | 234 | } |
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index d2634e4476b4..edbddcbd5bc6 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c | |||
| @@ -1404,8 +1404,6 @@ determine_cpu_caches (unsigned int cpu_type) | |||
| 1404 | case PCA56_CPU: | 1404 | case PCA56_CPU: |
| 1405 | case PCA57_CPU: | 1405 | case PCA57_CPU: |
| 1406 | { | 1406 | { |
| 1407 | unsigned long cbox_config, size; | ||
| 1408 | |||
| 1409 | if (cpu_type == PCA56_CPU) { | 1407 | if (cpu_type == PCA56_CPU) { |
| 1410 | L1I = CSHAPE(16*1024, 6, 1); | 1408 | L1I = CSHAPE(16*1024, 6, 1); |
| 1411 | L1D = CSHAPE(8*1024, 5, 1); | 1409 | L1D = CSHAPE(8*1024, 5, 1); |
| @@ -1415,10 +1413,12 @@ determine_cpu_caches (unsigned int cpu_type) | |||
| 1415 | } | 1413 | } |
| 1416 | L3 = -1; | 1414 | L3 = -1; |
| 1417 | 1415 | ||
| 1416 | #if 0 | ||
| 1417 | unsigned long cbox_config, size; | ||
| 1418 | |||
| 1418 | cbox_config = *(vulp) phys_to_virt (0xfffff00008UL); | 1419 | cbox_config = *(vulp) phys_to_virt (0xfffff00008UL); |
| 1419 | size = 512*1024 * (1 << ((cbox_config >> 12) & 3)); | 1420 | size = 512*1024 * (1 << ((cbox_config >> 12) & 3)); |
| 1420 | 1421 | ||
| 1421 | #if 0 | ||
| 1422 | L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1); | 1422 | L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1); |
| 1423 | #else | 1423 | #else |
| 1424 | L2 = external_cache_probe(512*1024, 6); | 1424 | L2 = external_cache_probe(512*1024, 6); |
diff --git a/arch/alpha/kernel/smc37c93x.c b/arch/alpha/kernel/smc37c93x.c index 3e6a2893af9f..6886b834f487 100644 --- a/arch/alpha/kernel/smc37c93x.c +++ b/arch/alpha/kernel/smc37c93x.c | |||
| @@ -79,7 +79,6 @@ | |||
| 79 | static unsigned long __init SMCConfigState(unsigned long baseAddr) | 79 | static unsigned long __init SMCConfigState(unsigned long baseAddr) |
| 80 | { | 80 | { |
| 81 | unsigned char devId; | 81 | unsigned char devId; |
| 82 | unsigned char devRev; | ||
| 83 | 82 | ||
| 84 | unsigned long configPort; | 83 | unsigned long configPort; |
| 85 | unsigned long indexPort; | 84 | unsigned long indexPort; |
| @@ -100,7 +99,7 @@ static unsigned long __init SMCConfigState(unsigned long baseAddr) | |||
| 100 | devId = inb(dataPort); | 99 | devId = inb(dataPort); |
| 101 | if (devId == VALID_DEVICE_ID) { | 100 | if (devId == VALID_DEVICE_ID) { |
| 102 | outb(DEVICE_REV, indexPort); | 101 | outb(DEVICE_REV, indexPort); |
| 103 | devRev = inb(dataPort); | 102 | /* unsigned char devRev = */ inb(dataPort); |
| 104 | break; | 103 | break; |
| 105 | } | 104 | } |
| 106 | else | 105 | else |
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c index d3cb28bb8eb0..d92cdc715c65 100644 --- a/arch/alpha/kernel/sys_wildfire.c +++ b/arch/alpha/kernel/sys_wildfire.c | |||
| @@ -156,7 +156,6 @@ static void __init | |||
| 156 | wildfire_init_irq_per_pca(int qbbno, int pcano) | 156 | wildfire_init_irq_per_pca(int qbbno, int pcano) |
| 157 | { | 157 | { |
| 158 | int i, irq_bias; | 158 | int i, irq_bias; |
| 159 | unsigned long io_bias; | ||
| 160 | static struct irqaction isa_enable = { | 159 | static struct irqaction isa_enable = { |
| 161 | .handler = no_action, | 160 | .handler = no_action, |
| 162 | .name = "isa_enable", | 161 | .name = "isa_enable", |
| @@ -165,10 +164,12 @@ wildfire_init_irq_per_pca(int qbbno, int pcano) | |||
| 165 | irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) | 164 | irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) |
| 166 | + pcano * WILDFIRE_IRQ_PER_PCA; | 165 | + pcano * WILDFIRE_IRQ_PER_PCA; |
| 167 | 166 | ||
| 167 | #if 0 | ||
| 168 | unsigned long io_bias; | ||
| 169 | |||
| 168 | /* Only need the following for first PCI bus per PCA. */ | 170 | /* Only need the following for first PCI bus per PCA. */ |
| 169 | io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS; | 171 | io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS; |
| 170 | 172 | ||
| 171 | #if 0 | ||
| 172 | outb(0, DMA1_RESET_REG + io_bias); | 173 | outb(0, DMA1_RESET_REG + io_bias); |
| 173 | outb(0, DMA2_RESET_REG + io_bias); | 174 | outb(0, DMA2_RESET_REG + io_bias); |
| 174 | outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias); | 175 | outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias); |
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index a58e84f1a63b..918e8e0b72ff 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c | |||
| @@ -153,6 +153,7 @@ void read_persistent_clock(struct timespec *ts) | |||
| 153 | year += 100; | 153 | year += 100; |
| 154 | 154 | ||
| 155 | ts->tv_sec = mktime(year, mon, day, hour, min, sec); | 155 | ts->tv_sec = mktime(year, mon, day, hour, min, sec); |
| 156 | ts->tv_nsec = 0; | ||
| 156 | } | 157 | } |
| 157 | 158 | ||
| 158 | 159 | ||
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig index 32f147998cd9..c0deacae778d 100644 --- a/arch/arm/mach-davinci/Kconfig +++ b/arch/arm/mach-davinci/Kconfig | |||
| @@ -63,6 +63,7 @@ config MACH_DAVINCI_EVM | |||
| 63 | depends on ARCH_DAVINCI_DM644x | 63 | depends on ARCH_DAVINCI_DM644x |
| 64 | select MISC_DEVICES | 64 | select MISC_DEVICES |
| 65 | select EEPROM_AT24 | 65 | select EEPROM_AT24 |
| 66 | select I2C | ||
| 66 | help | 67 | help |
| 67 | Configure this option to specify the whether the board used | 68 | Configure this option to specify the whether the board used |
| 68 | for development is a DM644x EVM | 69 | for development is a DM644x EVM |
| @@ -72,6 +73,7 @@ config MACH_SFFSDR | |||
| 72 | depends on ARCH_DAVINCI_DM644x | 73 | depends on ARCH_DAVINCI_DM644x |
| 73 | select MISC_DEVICES | 74 | select MISC_DEVICES |
| 74 | select EEPROM_AT24 | 75 | select EEPROM_AT24 |
| 76 | select I2C | ||
| 75 | help | 77 | help |
| 76 | Say Y here to select the Lyrtech Small Form Factor | 78 | Say Y here to select the Lyrtech Small Form Factor |
| 77 | Software Defined Radio (SFFSDR) board. | 79 | Software Defined Radio (SFFSDR) board. |
| @@ -105,6 +107,7 @@ config MACH_DAVINCI_DM6467_EVM | |||
| 105 | select MACH_DAVINCI_DM6467TEVM | 107 | select MACH_DAVINCI_DM6467TEVM |
| 106 | select MISC_DEVICES | 108 | select MISC_DEVICES |
| 107 | select EEPROM_AT24 | 109 | select EEPROM_AT24 |
| 110 | select I2C | ||
| 108 | help | 111 | help |
| 109 | Configure this option to specify the whether the board used | 112 | Configure this option to specify the whether the board used |
| 110 | for development is a DM6467 EVM | 113 | for development is a DM6467 EVM |
| @@ -118,6 +121,7 @@ config MACH_DAVINCI_DM365_EVM | |||
| 118 | depends on ARCH_DAVINCI_DM365 | 121 | depends on ARCH_DAVINCI_DM365 |
| 119 | select MISC_DEVICES | 122 | select MISC_DEVICES |
| 120 | select EEPROM_AT24 | 123 | select EEPROM_AT24 |
| 124 | select I2C | ||
| 121 | help | 125 | help |
| 122 | Configure this option to specify whether the board used | 126 | Configure this option to specify whether the board used |
| 123 | for development is a DM365 EVM | 127 | for development is a DM365 EVM |
| @@ -129,6 +133,7 @@ config MACH_DAVINCI_DA830_EVM | |||
| 129 | select GPIO_PCF857X | 133 | select GPIO_PCF857X |
| 130 | select MISC_DEVICES | 134 | select MISC_DEVICES |
| 131 | select EEPROM_AT24 | 135 | select EEPROM_AT24 |
| 136 | select I2C | ||
| 132 | help | 137 | help |
| 133 | Say Y here to select the TI DA830/OMAP-L137/AM17x Evaluation Module. | 138 | Say Y here to select the TI DA830/OMAP-L137/AM17x Evaluation Module. |
| 134 | 139 | ||
| @@ -205,6 +210,7 @@ config MACH_MITYOMAPL138 | |||
| 205 | depends on ARCH_DAVINCI_DA850 | 210 | depends on ARCH_DAVINCI_DA850 |
| 206 | select MISC_DEVICES | 211 | select MISC_DEVICES |
| 207 | select EEPROM_AT24 | 212 | select EEPROM_AT24 |
| 213 | select I2C | ||
| 208 | help | 214 | help |
| 209 | Say Y here to select the Critical Link MityDSP-L138/MityARM-1808 | 215 | Say Y here to select the Critical Link MityDSP-L138/MityARM-1808 |
| 210 | System on Module. Information on this SoM may be found at | 216 | System on Module. Information on this SoM may be found at |
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c index 2aa79c54f98e..606a6f27ed6c 100644 --- a/arch/arm/mach-davinci/board-mityomapl138.c +++ b/arch/arm/mach-davinci/board-mityomapl138.c | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | #include <mach/mux.h> | 29 | #include <mach/mux.h> |
| 30 | #include <mach/spi.h> | 30 | #include <mach/spi.h> |
| 31 | 31 | ||
| 32 | #define MITYOMAPL138_PHY_ID "0:03" | 32 | #define MITYOMAPL138_PHY_ID "" |
| 33 | 33 | ||
| 34 | #define FACTORY_CONFIG_MAGIC 0x012C0138 | 34 | #define FACTORY_CONFIG_MAGIC 0x012C0138 |
| 35 | #define FACTORY_CONFIG_VERSION 0x00010001 | 35 | #define FACTORY_CONFIG_VERSION 0x00010001 |
| @@ -414,7 +414,7 @@ static struct resource mityomapl138_nandflash_resource[] = { | |||
| 414 | 414 | ||
| 415 | static struct platform_device mityomapl138_nandflash_device = { | 415 | static struct platform_device mityomapl138_nandflash_device = { |
| 416 | .name = "davinci_nand", | 416 | .name = "davinci_nand", |
| 417 | .id = 0, | 417 | .id = 1, |
| 418 | .dev = { | 418 | .dev = { |
| 419 | .platform_data = &mityomapl138_nandflash_data, | 419 | .platform_data = &mityomapl138_nandflash_data, |
| 420 | }, | 420 | }, |
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index 625d4b66718b..58a02dc7b15a 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c | |||
| @@ -39,7 +39,8 @@ | |||
| 39 | #define DA8XX_GPIO_BASE 0x01e26000 | 39 | #define DA8XX_GPIO_BASE 0x01e26000 |
| 40 | #define DA8XX_I2C1_BASE 0x01e28000 | 40 | #define DA8XX_I2C1_BASE 0x01e28000 |
| 41 | #define DA8XX_SPI0_BASE 0x01c41000 | 41 | #define DA8XX_SPI0_BASE 0x01c41000 |
| 42 | #define DA8XX_SPI1_BASE 0x01f0e000 | 42 | #define DA830_SPI1_BASE 0x01e12000 |
| 43 | #define DA850_SPI1_BASE 0x01f0e000 | ||
| 43 | 44 | ||
| 44 | #define DA8XX_EMAC_CTRL_REG_OFFSET 0x3000 | 45 | #define DA8XX_EMAC_CTRL_REG_OFFSET 0x3000 |
| 45 | #define DA8XX_EMAC_MOD_REG_OFFSET 0x2000 | 46 | #define DA8XX_EMAC_MOD_REG_OFFSET 0x2000 |
| @@ -762,8 +763,8 @@ static struct resource da8xx_spi0_resources[] = { | |||
| 762 | 763 | ||
| 763 | static struct resource da8xx_spi1_resources[] = { | 764 | static struct resource da8xx_spi1_resources[] = { |
| 764 | [0] = { | 765 | [0] = { |
| 765 | .start = DA8XX_SPI1_BASE, | 766 | .start = DA830_SPI1_BASE, |
| 766 | .end = DA8XX_SPI1_BASE + SZ_4K - 1, | 767 | .end = DA830_SPI1_BASE + SZ_4K - 1, |
| 767 | .flags = IORESOURCE_MEM, | 768 | .flags = IORESOURCE_MEM, |
| 768 | }, | 769 | }, |
| 769 | [1] = { | 770 | [1] = { |
| @@ -832,5 +833,10 @@ int __init da8xx_register_spi(int instance, struct spi_board_info *info, | |||
| 832 | 833 | ||
| 833 | da8xx_spi_pdata[instance].num_chipselect = len; | 834 | da8xx_spi_pdata[instance].num_chipselect = len; |
| 834 | 835 | ||
| 836 | if (instance == 1 && cpu_is_davinci_da850()) { | ||
| 837 | da8xx_spi1_resources[0].start = DA850_SPI1_BASE; | ||
| 838 | da8xx_spi1_resources[0].end = DA850_SPI1_BASE + SZ_4K - 1; | ||
| 839 | } | ||
| 840 | |||
| 835 | return platform_device_register(&da8xx_spi_device[instance]); | 841 | return platform_device_register(&da8xx_spi_device[instance]); |
| 836 | } | 842 | } |
diff --git a/arch/arm/mach-davinci/include/mach/debug-macro.S b/arch/arm/mach-davinci/include/mach/debug-macro.S index 9f1befc5ac38..f8b7ea4f6235 100644 --- a/arch/arm/mach-davinci/include/mach/debug-macro.S +++ b/arch/arm/mach-davinci/include/mach/debug-macro.S | |||
| @@ -24,6 +24,9 @@ | |||
| 24 | 24 | ||
| 25 | #define UART_SHIFT 2 | 25 | #define UART_SHIFT 2 |
| 26 | 26 | ||
| 27 | #define davinci_uart_v2p(x) ((x) - PAGE_OFFSET + PLAT_PHYS_OFFSET) | ||
| 28 | #define davinci_uart_p2v(x) ((x) - PLAT_PHYS_OFFSET + PAGE_OFFSET) | ||
| 29 | |||
| 27 | .pushsection .data | 30 | .pushsection .data |
| 28 | davinci_uart_phys: .word 0 | 31 | davinci_uart_phys: .word 0 |
| 29 | davinci_uart_virt: .word 0 | 32 | davinci_uart_virt: .word 0 |
| @@ -34,7 +37,7 @@ davinci_uart_virt: .word 0 | |||
| 34 | /* Use davinci_uart_phys/virt if already configured */ | 37 | /* Use davinci_uart_phys/virt if already configured */ |
| 35 | 10: mrc p15, 0, \rp, c1, c0 | 38 | 10: mrc p15, 0, \rp, c1, c0 |
| 36 | tst \rp, #1 @ MMU enabled? | 39 | tst \rp, #1 @ MMU enabled? |
| 37 | ldreq \rp, =__virt_to_phys(davinci_uart_phys) | 40 | ldreq \rp, =davinci_uart_v2p(davinci_uart_phys) |
| 38 | ldrne \rp, =davinci_uart_phys | 41 | ldrne \rp, =davinci_uart_phys |
| 39 | add \rv, \rp, #4 @ davinci_uart_virt | 42 | add \rv, \rp, #4 @ davinci_uart_virt |
| 40 | ldr \rp, [\rp, #0] | 43 | ldr \rp, [\rp, #0] |
| @@ -48,18 +51,18 @@ davinci_uart_virt: .word 0 | |||
| 48 | tst \rp, #1 @ MMU enabled? | 51 | tst \rp, #1 @ MMU enabled? |
| 49 | 52 | ||
| 50 | /* Copy uart phys address from decompressor uart info */ | 53 | /* Copy uart phys address from decompressor uart info */ |
| 51 | ldreq \rv, =__virt_to_phys(davinci_uart_phys) | 54 | ldreq \rv, =davinci_uart_v2p(davinci_uart_phys) |
| 52 | ldrne \rv, =davinci_uart_phys | 55 | ldrne \rv, =davinci_uart_phys |
| 53 | ldreq \rp, =DAVINCI_UART_INFO | 56 | ldreq \rp, =DAVINCI_UART_INFO |
| 54 | ldrne \rp, =__phys_to_virt(DAVINCI_UART_INFO) | 57 | ldrne \rp, =davinci_uart_p2v(DAVINCI_UART_INFO) |
| 55 | ldr \rp, [\rp, #0] | 58 | ldr \rp, [\rp, #0] |
| 56 | str \rp, [\rv] | 59 | str \rp, [\rv] |
| 57 | 60 | ||
| 58 | /* Copy uart virt address from decompressor uart info */ | 61 | /* Copy uart virt address from decompressor uart info */ |
| 59 | ldreq \rv, =__virt_to_phys(davinci_uart_virt) | 62 | ldreq \rv, =davinci_uart_v2p(davinci_uart_virt) |
| 60 | ldrne \rv, =davinci_uart_virt | 63 | ldrne \rv, =davinci_uart_virt |
| 61 | ldreq \rp, =DAVINCI_UART_INFO | 64 | ldreq \rp, =DAVINCI_UART_INFO |
| 62 | ldrne \rp, =__phys_to_virt(DAVINCI_UART_INFO) | 65 | ldrne \rp, =davinci_uart_p2v(DAVINCI_UART_INFO) |
| 63 | ldr \rp, [\rp, #4] | 66 | ldr \rp, [\rp, #4] |
| 64 | str \rp, [\rv] | 67 | str \rp, [\rv] |
| 65 | 68 | ||
diff --git a/arch/arm/mach-davinci/include/mach/serial.h b/arch/arm/mach-davinci/include/mach/serial.h index 8051110b8ac3..c9e6ce185a66 100644 --- a/arch/arm/mach-davinci/include/mach/serial.h +++ b/arch/arm/mach-davinci/include/mach/serial.h | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | * | 22 | * |
| 23 | * This area sits just below the page tables (see arch/arm/kernel/head.S). | 23 | * This area sits just below the page tables (see arch/arm/kernel/head.S). |
| 24 | */ | 24 | */ |
| 25 | #define DAVINCI_UART_INFO (PHYS_OFFSET + 0x3ff8) | 25 | #define DAVINCI_UART_INFO (PLAT_PHYS_OFFSET + 0x3ff8) |
| 26 | 26 | ||
| 27 | #define DAVINCI_UART0_BASE (IO_PHYS + 0x20000) | 27 | #define DAVINCI_UART0_BASE (IO_PHYS + 0x20000) |
| 28 | #define DAVINCI_UART1_BASE (IO_PHYS + 0x20400) | 28 | #define DAVINCI_UART1_BASE (IO_PHYS + 0x20400) |
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c index 7f568611547e..6a96911b0ad5 100644 --- a/arch/arm/mach-msm/board-qsd8x50.c +++ b/arch/arm/mach-msm/board-qsd8x50.c | |||
| @@ -160,10 +160,7 @@ static struct msm_mmc_platform_data qsd8x50_sdc1_data = { | |||
| 160 | 160 | ||
| 161 | static void __init qsd8x50_init_mmc(void) | 161 | static void __init qsd8x50_init_mmc(void) |
| 162 | { | 162 | { |
| 163 | if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa()) | 163 | vreg_mmc = vreg_get(NULL, "gp5"); |
| 164 | vreg_mmc = vreg_get(NULL, "gp6"); | ||
| 165 | else | ||
| 166 | vreg_mmc = vreg_get(NULL, "gp5"); | ||
| 167 | 164 | ||
| 168 | if (IS_ERR(vreg_mmc)) { | 165 | if (IS_ERR(vreg_mmc)) { |
| 169 | pr_err("vreg get for vreg_mmc failed (%ld)\n", | 166 | pr_err("vreg get for vreg_mmc failed (%ld)\n", |
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 56f920c55b6a..38b95e949d13 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c | |||
| @@ -269,7 +269,7 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt) | |||
| 269 | 269 | ||
| 270 | /* Use existing clock_event for cpu 0 */ | 270 | /* Use existing clock_event for cpu 0 */ |
| 271 | if (!smp_processor_id()) | 271 | if (!smp_processor_id()) |
| 272 | return; | 272 | return 0; |
| 273 | 273 | ||
| 274 | writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL); | 274 | writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL); |
| 275 | 275 | ||
diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c index 76a3f654220f..65a1aba6823d 100644 --- a/arch/arm/mach-tegra/gpio.c +++ b/arch/arm/mach-tegra/gpio.c | |||
| @@ -257,7 +257,8 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | |||
| 257 | void tegra_gpio_resume(void) | 257 | void tegra_gpio_resume(void) |
| 258 | { | 258 | { |
| 259 | unsigned long flags; | 259 | unsigned long flags; |
| 260 | int b, p, i; | 260 | int b; |
| 261 | int p; | ||
| 261 | 262 | ||
| 262 | local_irq_save(flags); | 263 | local_irq_save(flags); |
| 263 | 264 | ||
| @@ -280,7 +281,8 @@ void tegra_gpio_resume(void) | |||
| 280 | void tegra_gpio_suspend(void) | 281 | void tegra_gpio_suspend(void) |
| 281 | { | 282 | { |
| 282 | unsigned long flags; | 283 | unsigned long flags; |
| 283 | int b, p, i; | 284 | int b; |
| 285 | int p; | ||
| 284 | 286 | ||
| 285 | local_irq_save(flags); | 287 | local_irq_save(flags); |
| 286 | for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { | 288 | for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { |
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c index 6d7c4eea4dcb..4459470c052d 100644 --- a/arch/arm/mach-tegra/tegra2_clocks.c +++ b/arch/arm/mach-tegra/tegra2_clocks.c | |||
| @@ -1362,14 +1362,15 @@ static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate) | |||
| 1362 | { | 1362 | { |
| 1363 | unsigned long flags; | 1363 | unsigned long flags; |
| 1364 | int ret; | 1364 | int ret; |
| 1365 | long new_rate = rate; | ||
| 1365 | 1366 | ||
| 1366 | rate = clk_round_rate(c->parent, rate); | 1367 | new_rate = clk_round_rate(c->parent, new_rate); |
| 1367 | if (rate < 0) | 1368 | if (new_rate < 0) |
| 1368 | return rate; | 1369 | return new_rate; |
| 1369 | 1370 | ||
| 1370 | spin_lock_irqsave(&c->parent->spinlock, flags); | 1371 | spin_lock_irqsave(&c->parent->spinlock, flags); |
| 1371 | 1372 | ||
| 1372 | c->u.shared_bus_user.rate = rate; | 1373 | c->u.shared_bus_user.rate = new_rate; |
| 1373 | ret = tegra_clk_shared_bus_update(c->parent); | 1374 | ret = tegra_clk_shared_bus_update(c->parent); |
| 1374 | 1375 | ||
| 1375 | spin_unlock_irqrestore(&c->parent->spinlock, flags); | 1376 | spin_unlock_irqrestore(&c->parent->spinlock, flags); |
diff --git a/arch/arm/plat-s5p/pm.c b/arch/arm/plat-s5p/pm.c index d592b6304b48..d15dc47b0e3d 100644 --- a/arch/arm/plat-s5p/pm.c +++ b/arch/arm/plat-s5p/pm.c | |||
| @@ -19,17 +19,6 @@ | |||
| 19 | 19 | ||
| 20 | #define PFX "s5p pm: " | 20 | #define PFX "s5p pm: " |
| 21 | 21 | ||
| 22 | /* s3c_pm_check_resume_pin | ||
| 23 | * | ||
| 24 | * check to see if the pin is configured correctly for sleep mode, and | ||
| 25 | * make any necessary adjustments if it is not | ||
| 26 | */ | ||
| 27 | |||
| 28 | static void s3c_pm_check_resume_pin(unsigned int pin, unsigned int irqoffs) | ||
| 29 | { | ||
| 30 | /* nothing here yet */ | ||
| 31 | } | ||
| 32 | |||
| 33 | /* s3c_pm_configure_extint | 22 | /* s3c_pm_configure_extint |
| 34 | * | 23 | * |
| 35 | * configure all external interrupt pins | 24 | * configure all external interrupt pins |
diff --git a/arch/arm/plat-samsung/pm-check.c b/arch/arm/plat-samsung/pm-check.c index e4baf76f374a..6b733fafe7cd 100644 --- a/arch/arm/plat-samsung/pm-check.c +++ b/arch/arm/plat-samsung/pm-check.c | |||
| @@ -164,7 +164,6 @@ static inline int in_region(void *ptr, int size, void *what, size_t whatsz) | |||
| 164 | */ | 164 | */ |
| 165 | static u32 *s3c_pm_runcheck(struct resource *res, u32 *val) | 165 | static u32 *s3c_pm_runcheck(struct resource *res, u32 *val) |
| 166 | { | 166 | { |
| 167 | void *save_at = phys_to_virt(s3c_sleep_save_phys); | ||
| 168 | unsigned long addr; | 167 | unsigned long addr; |
| 169 | unsigned long left; | 168 | unsigned long left; |
| 170 | void *stkpage; | 169 | void *stkpage; |
| @@ -192,11 +191,6 @@ static u32 *s3c_pm_runcheck(struct resource *res, u32 *val) | |||
| 192 | goto skip_check; | 191 | goto skip_check; |
| 193 | } | 192 | } |
| 194 | 193 | ||
| 195 | if (in_region(ptr, left, save_at, 32*4 )) { | ||
| 196 | S3C_PMDBG("skipping %08lx, has save block in\n", addr); | ||
| 197 | goto skip_check; | ||
| 198 | } | ||
| 199 | |||
| 200 | /* calculate and check the checksum */ | 194 | /* calculate and check the checksum */ |
| 201 | 195 | ||
| 202 | calc = crc32_le(~0, ptr, left); | 196 | calc = crc32_le(~0, ptr, left); |
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c index d5b58d31903c..5c0a440d6e16 100644 --- a/arch/arm/plat-samsung/pm.c +++ b/arch/arm/plat-samsung/pm.c | |||
| @@ -214,8 +214,9 @@ void s3c_pm_do_restore_core(struct sleep_save *ptr, int count) | |||
| 214 | * | 214 | * |
| 215 | * print any IRQs asserted at resume time (ie, we woke from) | 215 | * print any IRQs asserted at resume time (ie, we woke from) |
| 216 | */ | 216 | */ |
| 217 | static void s3c_pm_show_resume_irqs(int start, unsigned long which, | 217 | static void __maybe_unused s3c_pm_show_resume_irqs(int start, |
| 218 | unsigned long mask) | 218 | unsigned long which, |
| 219 | unsigned long mask) | ||
| 219 | { | 220 | { |
| 220 | int i; | 221 | int i; |
| 221 | 222 | ||
diff --git a/arch/avr32/include/asm/setup.h b/arch/avr32/include/asm/setup.h index ff5b7cf6be4d..160543dbec7e 100644 --- a/arch/avr32/include/asm/setup.h +++ b/arch/avr32/include/asm/setup.h | |||
| @@ -94,6 +94,13 @@ struct tag_ethernet { | |||
| 94 | 94 | ||
| 95 | #define ETH_INVALID_PHY 0xff | 95 | #define ETH_INVALID_PHY 0xff |
| 96 | 96 | ||
| 97 | /* board information */ | ||
| 98 | #define ATAG_BOARDINFO 0x54410008 | ||
| 99 | |||
| 100 | struct tag_boardinfo { | ||
| 101 | u32 board_number; | ||
| 102 | }; | ||
| 103 | |||
| 97 | struct tag { | 104 | struct tag { |
| 98 | struct tag_header hdr; | 105 | struct tag_header hdr; |
| 99 | union { | 106 | union { |
| @@ -102,6 +109,7 @@ struct tag { | |||
| 102 | struct tag_cmdline cmdline; | 109 | struct tag_cmdline cmdline; |
| 103 | struct tag_clock clock; | 110 | struct tag_clock clock; |
| 104 | struct tag_ethernet ethernet; | 111 | struct tag_ethernet ethernet; |
| 112 | struct tag_boardinfo boardinfo; | ||
| 105 | } u; | 113 | } u; |
| 106 | }; | 114 | }; |
| 107 | 115 | ||
| @@ -128,6 +136,7 @@ extern struct tag *bootloader_tags; | |||
| 128 | 136 | ||
| 129 | extern resource_size_t fbmem_start; | 137 | extern resource_size_t fbmem_start; |
| 130 | extern resource_size_t fbmem_size; | 138 | extern resource_size_t fbmem_size; |
| 139 | extern u32 board_number; | ||
| 131 | 140 | ||
| 132 | void setup_processor(void); | 141 | void setup_processor(void); |
| 133 | 142 | ||
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c index 5c7083916c33..bb0974cce4ac 100644 --- a/arch/avr32/kernel/setup.c +++ b/arch/avr32/kernel/setup.c | |||
| @@ -391,6 +391,21 @@ static int __init parse_tag_clock(struct tag *tag) | |||
| 391 | __tagtable(ATAG_CLOCK, parse_tag_clock); | 391 | __tagtable(ATAG_CLOCK, parse_tag_clock); |
| 392 | 392 | ||
| 393 | /* | 393 | /* |
| 394 | * The board_number correspond to the bd->bi_board_number in U-Boot. This | ||
| 395 | * parameter is only available during initialisation and can be used in some | ||
| 396 | * kind of board identification. | ||
| 397 | */ | ||
| 398 | u32 __initdata board_number; | ||
| 399 | |||
| 400 | static int __init parse_tag_boardinfo(struct tag *tag) | ||
| 401 | { | ||
| 402 | board_number = tag->u.boardinfo.board_number; | ||
| 403 | |||
| 404 | return 0; | ||
| 405 | } | ||
| 406 | __tagtable(ATAG_BOARDINFO, parse_tag_boardinfo); | ||
| 407 | |||
| 408 | /* | ||
| 394 | * Scan the tag table for this tag, and call its parse function. The | 409 | * Scan the tag table for this tag, and call its parse function. The |
| 395 | * tag table is built by the linker from all the __tagtable | 410 | * tag table is built by the linker from all the __tagtable |
| 396 | * declarations. | 411 | * declarations. |
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c index b91b2044af9c..7aa25756412f 100644 --- a/arch/avr32/kernel/traps.c +++ b/arch/avr32/kernel/traps.c | |||
| @@ -95,28 +95,6 @@ void _exception(long signr, struct pt_regs *regs, int code, | |||
| 95 | info.si_code = code; | 95 | info.si_code = code; |
| 96 | info.si_addr = (void __user *)addr; | 96 | info.si_addr = (void __user *)addr; |
| 97 | force_sig_info(signr, &info, current); | 97 | force_sig_info(signr, &info, current); |
| 98 | |||
| 99 | /* | ||
| 100 | * Init gets no signals that it doesn't have a handler for. | ||
| 101 | * That's all very well, but if it has caused a synchronous | ||
| 102 | * exception and we ignore the resulting signal, it will just | ||
| 103 | * generate the same exception over and over again and we get | ||
| 104 | * nowhere. Better to kill it and let the kernel panic. | ||
| 105 | */ | ||
| 106 | if (is_global_init(current)) { | ||
| 107 | __sighandler_t handler; | ||
| 108 | |||
| 109 | spin_lock_irq(¤t->sighand->siglock); | ||
| 110 | handler = current->sighand->action[signr-1].sa.sa_handler; | ||
| 111 | spin_unlock_irq(¤t->sighand->siglock); | ||
| 112 | if (handler == SIG_DFL) { | ||
| 113 | /* init has generated a synchronous exception | ||
| 114 | and it doesn't have a handler for the signal */ | ||
| 115 | printk(KERN_CRIT "init has generated signal %ld " | ||
| 116 | "but has no handler for it\n", signr); | ||
| 117 | do_exit(signr); | ||
| 118 | } | ||
| 119 | } | ||
| 120 | } | 98 | } |
| 121 | 99 | ||
| 122 | asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs) | 100 | asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs) |
diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c index 442f08c5e641..86925fd6ea5b 100644 --- a/arch/avr32/mach-at32ap/clock.c +++ b/arch/avr32/mach-at32ap/clock.c | |||
| @@ -35,22 +35,30 @@ void at32_clk_register(struct clk *clk) | |||
| 35 | spin_unlock(&clk_list_lock); | 35 | spin_unlock(&clk_list_lock); |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | struct clk *clk_get(struct device *dev, const char *id) | 38 | static struct clk *__clk_get(struct device *dev, const char *id) |
| 39 | { | 39 | { |
| 40 | struct clk *clk; | 40 | struct clk *clk; |
| 41 | 41 | ||
| 42 | spin_lock(&clk_list_lock); | ||
| 43 | |||
| 44 | list_for_each_entry(clk, &at32_clock_list, list) { | 42 | list_for_each_entry(clk, &at32_clock_list, list) { |
| 45 | if (clk->dev == dev && strcmp(id, clk->name) == 0) { | 43 | if (clk->dev == dev && strcmp(id, clk->name) == 0) { |
| 46 | spin_unlock(&clk_list_lock); | ||
| 47 | return clk; | 44 | return clk; |
| 48 | } | 45 | } |
| 49 | } | 46 | } |
| 50 | 47 | ||
| 51 | spin_unlock(&clk_list_lock); | ||
| 52 | return ERR_PTR(-ENOENT); | 48 | return ERR_PTR(-ENOENT); |
| 53 | } | 49 | } |
| 50 | |||
| 51 | struct clk *clk_get(struct device *dev, const char *id) | ||
| 52 | { | ||
| 53 | struct clk *clk; | ||
| 54 | |||
| 55 | spin_lock(&clk_list_lock); | ||
| 56 | clk = __clk_get(dev, id); | ||
| 57 | spin_unlock(&clk_list_lock); | ||
| 58 | |||
| 59 | return clk; | ||
| 60 | } | ||
| 61 | |||
| 54 | EXPORT_SYMBOL(clk_get); | 62 | EXPORT_SYMBOL(clk_get); |
| 55 | 63 | ||
| 56 | void clk_put(struct clk *clk) | 64 | void clk_put(struct clk *clk) |
| @@ -257,15 +265,15 @@ static int clk_show(struct seq_file *s, void *unused) | |||
| 257 | spin_lock(&clk_list_lock); | 265 | spin_lock(&clk_list_lock); |
| 258 | 266 | ||
| 259 | /* show clock tree as derived from the three oscillators */ | 267 | /* show clock tree as derived from the three oscillators */ |
| 260 | clk = clk_get(NULL, "osc32k"); | 268 | clk = __clk_get(NULL, "osc32k"); |
| 261 | dump_clock(clk, &r); | 269 | dump_clock(clk, &r); |
| 262 | clk_put(clk); | 270 | clk_put(clk); |
| 263 | 271 | ||
| 264 | clk = clk_get(NULL, "osc0"); | 272 | clk = __clk_get(NULL, "osc0"); |
| 265 | dump_clock(clk, &r); | 273 | dump_clock(clk, &r); |
| 266 | clk_put(clk); | 274 | clk_put(clk); |
| 267 | 275 | ||
| 268 | clk = clk_get(NULL, "osc1"); | 276 | clk = __clk_get(NULL, "osc1"); |
| 269 | dump_clock(clk, &r); | 277 | dump_clock(clk, &r); |
| 270 | clk_put(clk); | 278 | clk_put(clk); |
| 271 | 279 | ||
diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c index 47ba4b9b6db1..fbc2aeaebddb 100644 --- a/arch/avr32/mach-at32ap/extint.c +++ b/arch/avr32/mach-at32ap/extint.c | |||
| @@ -61,34 +61,34 @@ struct eic { | |||
| 61 | static struct eic *nmi_eic; | 61 | static struct eic *nmi_eic; |
| 62 | static bool nmi_enabled; | 62 | static bool nmi_enabled; |
| 63 | 63 | ||
| 64 | static void eic_ack_irq(struct irq_chip *d) | 64 | static void eic_ack_irq(struct irq_data *d) |
| 65 | { | 65 | { |
| 66 | struct eic *eic = irq_data_get_irq_chip_data(data); | 66 | struct eic *eic = irq_data_get_irq_chip_data(d); |
| 67 | eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); | 67 | eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | static void eic_mask_irq(struct irq_chip *d) | 70 | static void eic_mask_irq(struct irq_data *d) |
| 71 | { | 71 | { |
| 72 | struct eic *eic = irq_data_get_irq_chip_data(data); | 72 | struct eic *eic = irq_data_get_irq_chip_data(d); |
| 73 | eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); | 73 | eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); |
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | static void eic_mask_ack_irq(struct irq_chip *d) | 76 | static void eic_mask_ack_irq(struct irq_data *d) |
| 77 | { | 77 | { |
| 78 | struct eic *eic = irq_data_get_irq_chip_data(data); | 78 | struct eic *eic = irq_data_get_irq_chip_data(d); |
| 79 | eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); | 79 | eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); |
| 80 | eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); | 80 | eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | static void eic_unmask_irq(struct irq_chip *d) | 83 | static void eic_unmask_irq(struct irq_data *d) |
| 84 | { | 84 | { |
| 85 | struct eic *eic = irq_data_get_irq_chip_data(data); | 85 | struct eic *eic = irq_data_get_irq_chip_data(d); |
| 86 | eic_writel(eic, IER, 1 << (d->irq - eic->first_irq)); | 86 | eic_writel(eic, IER, 1 << (d->irq - eic->first_irq)); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | static int eic_set_irq_type(struct irq_chip *d, unsigned int flow_type) | 89 | static int eic_set_irq_type(struct irq_data *d, unsigned int flow_type) |
| 90 | { | 90 | { |
| 91 | struct eic *eic = irq_data_get_irq_chip_data(data); | 91 | struct eic *eic = irq_data_get_irq_chip_data(d); |
| 92 | unsigned int irq = d->irq; | 92 | unsigned int irq = d->irq; |
| 93 | unsigned int i = irq - eic->first_irq; | 93 | unsigned int i = irq - eic->first_irq; |
| 94 | u32 mode, edge, level; | 94 | u32 mode, edge, level; |
| @@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev) | |||
| 191 | 191 | ||
| 192 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 192 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 193 | int_irq = platform_get_irq(pdev, 0); | 193 | int_irq = platform_get_irq(pdev, 0); |
| 194 | if (!regs || !int_irq) { | 194 | if (!regs || (int)int_irq <= 0) { |
| 195 | dev_dbg(&pdev->dev, "missing regs and/or irq resource\n"); | 195 | dev_dbg(&pdev->dev, "missing regs and/or irq resource\n"); |
| 196 | return -ENXIO; | 196 | return -ENXIO; |
| 197 | } | 197 | } |
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c index f308e1ddc629..2e0aa853a4bc 100644 --- a/arch/avr32/mach-at32ap/pio.c +++ b/arch/avr32/mach-at32ap/pio.c | |||
| @@ -257,7 +257,7 @@ static void gpio_irq_mask(struct irq_data *d) | |||
| 257 | pio_writel(pio, IDR, 1 << (gpio & 0x1f)); | 257 | pio_writel(pio, IDR, 1 << (gpio & 0x1f)); |
| 258 | } | 258 | } |
| 259 | 259 | ||
| 260 | static void gpio_irq_unmask(struct irq_data *d)) | 260 | static void gpio_irq_unmask(struct irq_data *d) |
| 261 | { | 261 | { |
| 262 | unsigned gpio = irq_to_gpio(d->irq); | 262 | unsigned gpio = irq_to_gpio(d->irq); |
| 263 | struct pio_device *pio = &pio_dev[gpio >> 5]; | 263 | struct pio_device *pio = &pio_dev[gpio >> 5]; |
diff --git a/arch/avr32/mach-at32ap/pm-at32ap700x.S b/arch/avr32/mach-at32ap/pm-at32ap700x.S index 17503b0ed6c9..f868f4ce761b 100644 --- a/arch/avr32/mach-at32ap/pm-at32ap700x.S +++ b/arch/avr32/mach-at32ap/pm-at32ap700x.S | |||
| @@ -53,7 +53,7 @@ cpu_enter_idle: | |||
| 53 | st.w r8[TI_flags], r9 | 53 | st.w r8[TI_flags], r9 |
| 54 | unmask_interrupts | 54 | unmask_interrupts |
| 55 | sleep CPU_SLEEP_IDLE | 55 | sleep CPU_SLEEP_IDLE |
| 56 | .size cpu_idle_sleep, . - cpu_idle_sleep | 56 | .size cpu_enter_idle, . - cpu_enter_idle |
| 57 | 57 | ||
| 58 | /* | 58 | /* |
| 59 | * Common return path for PM functions that don't run from | 59 | * Common return path for PM functions that don't run from |
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h index 19e2c7c3e63a..44bd0cced725 100644 --- a/arch/blackfin/include/asm/system.h +++ b/arch/blackfin/include/asm/system.h | |||
| @@ -19,11 +19,11 @@ | |||
| 19 | * Force strict CPU ordering. | 19 | * Force strict CPU ordering. |
| 20 | */ | 20 | */ |
| 21 | #define nop() __asm__ __volatile__ ("nop;\n\t" : : ) | 21 | #define nop() __asm__ __volatile__ ("nop;\n\t" : : ) |
| 22 | #define mb() __asm__ __volatile__ ("" : : : "memory") | 22 | #define smp_mb() mb() |
| 23 | #define rmb() __asm__ __volatile__ ("" : : : "memory") | 23 | #define smp_rmb() rmb() |
| 24 | #define wmb() __asm__ __volatile__ ("" : : : "memory") | 24 | #define smp_wmb() wmb() |
| 25 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) | 25 | #define set_mb(var, value) do { var = value; mb(); } while (0) |
| 26 | #define read_barrier_depends() do { } while(0) | 26 | #define smp_read_barrier_depends() read_barrier_depends() |
| 27 | 27 | ||
| 28 | #ifdef CONFIG_SMP | 28 | #ifdef CONFIG_SMP |
| 29 | asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); | 29 | asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); |
| @@ -37,16 +37,16 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr, | |||
| 37 | unsigned long new, unsigned long old); | 37 | unsigned long new, unsigned long old); |
| 38 | 38 | ||
| 39 | #ifdef __ARCH_SYNC_CORE_DCACHE | 39 | #ifdef __ARCH_SYNC_CORE_DCACHE |
| 40 | # define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) | 40 | /* Force Core data cache coherence */ |
| 41 | # define smp_rmb() do { barrier(); smp_check_barrier(); } while (0) | 41 | # define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) |
| 42 | # define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0) | 42 | # define rmb() do { barrier(); smp_check_barrier(); } while (0) |
| 43 | #define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) | 43 | # define wmb() do { barrier(); smp_mark_barrier(); } while (0) |
| 44 | 44 | # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) | |
| 45 | #else | 45 | #else |
| 46 | # define smp_mb() barrier() | 46 | # define mb() barrier() |
| 47 | # define smp_rmb() barrier() | 47 | # define rmb() barrier() |
| 48 | # define smp_wmb() barrier() | 48 | # define wmb() barrier() |
| 49 | #define smp_read_barrier_depends() barrier() | 49 | # define read_barrier_depends() do { } while (0) |
| 50 | #endif | 50 | #endif |
| 51 | 51 | ||
| 52 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | 52 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, |
| @@ -99,10 +99,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
| 99 | 99 | ||
| 100 | #else /* !CONFIG_SMP */ | 100 | #else /* !CONFIG_SMP */ |
| 101 | 101 | ||
| 102 | #define smp_mb() barrier() | 102 | #define mb() barrier() |
| 103 | #define smp_rmb() barrier() | 103 | #define rmb() barrier() |
| 104 | #define smp_wmb() barrier() | 104 | #define wmb() barrier() |
| 105 | #define smp_read_barrier_depends() do { } while(0) | 105 | #define read_barrier_depends() do { } while (0) |
| 106 | 106 | ||
| 107 | struct __xchg_dummy { | 107 | struct __xchg_dummy { |
| 108 | unsigned long a[100]; | 108 | unsigned long a[100]; |
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c index cdbe075de1dc..8b81dc04488a 100644 --- a/arch/blackfin/kernel/gptimers.c +++ b/arch/blackfin/kernel/gptimers.c | |||
| @@ -268,7 +268,7 @@ void disable_gptimers(uint16_t mask) | |||
| 268 | _disable_gptimers(mask); | 268 | _disable_gptimers(mask); |
| 269 | for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) | 269 | for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) |
| 270 | if (mask & (1 << i)) | 270 | if (mask & (1 << i)) |
| 271 | group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i]; | 271 | group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i]; |
| 272 | SSYNC(); | 272 | SSYNC(); |
| 273 | } | 273 | } |
| 274 | EXPORT_SYMBOL(disable_gptimers); | 274 | EXPORT_SYMBOL(disable_gptimers); |
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index 8c9a43daf80f..cdb4beb6bc8f 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c | |||
| @@ -206,8 +206,14 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id) | |||
| 206 | { | 206 | { |
| 207 | struct clock_event_device *evt = dev_id; | 207 | struct clock_event_device *evt = dev_id; |
| 208 | smp_mb(); | 208 | smp_mb(); |
| 209 | evt->event_handler(evt); | 209 | /* |
| 210 | * We want to ACK before we handle so that we can handle smaller timer | ||
| 211 | * intervals. This way if the timer expires again while we're handling | ||
| 212 | * things, we're more likely to see that 2nd int rather than swallowing | ||
| 213 | * it by ACKing the int at the end of this handler. | ||
| 214 | */ | ||
| 210 | bfin_gptmr0_ack(); | 215 | bfin_gptmr0_ack(); |
| 216 | evt->event_handler(evt); | ||
| 211 | return IRQ_HANDLED; | 217 | return IRQ_HANDLED; |
| 212 | } | 218 | } |
| 213 | 219 | ||
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 6e17a265c4d3..8bce5ed031e4 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c | |||
| @@ -109,10 +109,23 @@ static void ipi_flush_icache(void *info) | |||
| 109 | struct blackfin_flush_data *fdata = info; | 109 | struct blackfin_flush_data *fdata = info; |
| 110 | 110 | ||
| 111 | /* Invalidate the memory holding the bounds of the flushed region. */ | 111 | /* Invalidate the memory holding the bounds of the flushed region. */ |
| 112 | invalidate_dcache_range((unsigned long)fdata, | 112 | blackfin_dcache_invalidate_range((unsigned long)fdata, |
| 113 | (unsigned long)fdata + sizeof(*fdata)); | 113 | (unsigned long)fdata + sizeof(*fdata)); |
| 114 | |||
| 115 | /* Make sure all write buffers in the data side of the core | ||
| 116 | * are flushed before trying to invalidate the icache. This | ||
| 117 | * needs to be after the data flush and before the icache | ||
| 118 | * flush so that the SSYNC does the right thing in preventing | ||
| 119 | * the instruction prefetcher from hitting things in cached | ||
| 120 | * memory at the wrong time -- it runs much further ahead than | ||
| 121 | * the pipeline. | ||
| 122 | */ | ||
| 123 | SSYNC(); | ||
| 114 | 124 | ||
| 115 | flush_icache_range(fdata->start, fdata->end); | 125 | /* ipi_flaush_icache is invoked by generic flush_icache_range, |
| 126 | * so call blackfin arch icache flush directly here. | ||
| 127 | */ | ||
| 128 | blackfin_icache_flush_range(fdata->start, fdata->end); | ||
| 116 | } | 129 | } |
| 117 | 130 | ||
| 118 | static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) | 131 | static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) |
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 26d851d385bb..29e17907d9f2 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
| @@ -343,10 +343,14 @@ | |||
| 343 | #define __NR_fanotify_init 337 | 343 | #define __NR_fanotify_init 337 |
| 344 | #define __NR_fanotify_mark 338 | 344 | #define __NR_fanotify_mark 338 |
| 345 | #define __NR_prlimit64 339 | 345 | #define __NR_prlimit64 339 |
| 346 | #define __NR_name_to_handle_at 340 | ||
| 347 | #define __NR_open_by_handle_at 341 | ||
| 348 | #define __NR_clock_adjtime 342 | ||
| 349 | #define __NR_syncfs 343 | ||
| 346 | 350 | ||
| 347 | #ifdef __KERNEL__ | 351 | #ifdef __KERNEL__ |
| 348 | 352 | ||
| 349 | #define NR_syscalls 340 | 353 | #define NR_syscalls 344 |
| 350 | 354 | ||
| 351 | #define __ARCH_WANT_IPC_PARSE_VERSION | 355 | #define __ARCH_WANT_IPC_PARSE_VERSION |
| 352 | #define __ARCH_WANT_OLD_READDIR | 356 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S index 1559dea36e55..1359ee659574 100644 --- a/arch/m68k/kernel/entry_mm.S +++ b/arch/m68k/kernel/entry_mm.S | |||
| @@ -750,4 +750,8 @@ sys_call_table: | |||
| 750 | .long sys_fanotify_init | 750 | .long sys_fanotify_init |
| 751 | .long sys_fanotify_mark | 751 | .long sys_fanotify_mark |
| 752 | .long sys_prlimit64 | 752 | .long sys_prlimit64 |
| 753 | .long sys_name_to_handle_at /* 340 */ | ||
| 754 | .long sys_open_by_handle_at | ||
| 755 | .long sys_clock_adjtime | ||
| 756 | .long sys_syncfs | ||
| 753 | 757 | ||
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 79b1ed198c07..9b8393d8adb8 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S | |||
| @@ -358,6 +358,10 @@ ENTRY(sys_call_table) | |||
| 358 | .long sys_fanotify_init | 358 | .long sys_fanotify_init |
| 359 | .long sys_fanotify_mark | 359 | .long sys_fanotify_mark |
| 360 | .long sys_prlimit64 | 360 | .long sys_prlimit64 |
| 361 | .long sys_name_to_handle_at /* 340 */ | ||
| 362 | .long sys_open_by_handle_at | ||
| 363 | .long sys_clock_adjtime | ||
| 364 | .long sys_syncfs | ||
| 361 | 365 | ||
| 362 | .rept NR_syscalls-(.-sys_call_table)/4 | 366 | .rept NR_syscalls-(.-sys_call_table)/4 |
| 363 | .long sys_ni_syscall | 367 | .long sys_ni_syscall |
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 851b3bf6e962..eccdefe70d4e 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
| @@ -6,7 +6,6 @@ config MICROBLAZE | |||
| 6 | select HAVE_FUNCTION_GRAPH_TRACER | 6 | select HAVE_FUNCTION_GRAPH_TRACER |
| 7 | select HAVE_DYNAMIC_FTRACE | 7 | select HAVE_DYNAMIC_FTRACE |
| 8 | select HAVE_FTRACE_MCOUNT_RECORD | 8 | select HAVE_FTRACE_MCOUNT_RECORD |
| 9 | select USB_ARCH_HAS_EHCI | ||
| 10 | select ARCH_WANT_OPTIONAL_GPIOLIB | 9 | select ARCH_WANT_OPTIONAL_GPIOLIB |
| 11 | select HAVE_OPROFILE | 10 | select HAVE_OPROFILE |
| 12 | select HAVE_ARCH_KGDB | 11 | select HAVE_ARCH_KGDB |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b6ff882f695b..8f4d50b0adfa 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
| @@ -209,7 +209,7 @@ config ARCH_HIBERNATION_POSSIBLE | |||
| 209 | config ARCH_SUSPEND_POSSIBLE | 209 | config ARCH_SUSPEND_POSSIBLE |
| 210 | def_bool y | 210 | def_bool y |
| 211 | depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \ | 211 | depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \ |
| 212 | PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x | 212 | (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x |
| 213 | 213 | ||
| 214 | config PPC_DCR_NATIVE | 214 | config PPC_DCR_NATIVE |
| 215 | bool | 215 | bool |
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index be3cdf9134ce..1833d1a07e79 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h | |||
| @@ -382,10 +382,12 @@ extern const char *powerpc_base_platform; | |||
| 382 | #define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ | 382 | #define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ |
| 383 | CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ | 383 | CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ |
| 384 | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) | 384 | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) |
| 385 | #define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ | 385 | #define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ |
| 386 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ | ||
| 387 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ | 386 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ |
| 388 | CPU_FTR_DBELL) | 387 | CPU_FTR_DBELL) |
| 388 | #define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ | ||
| 389 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ | ||
| 390 | CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD) | ||
| 389 | #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) | 391 | #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) |
| 390 | 392 | ||
| 391 | /* 64-bit CPUs */ | 393 | /* 64-bit CPUs */ |
| @@ -435,11 +437,15 @@ extern const char *powerpc_base_platform; | |||
| 435 | #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) | 437 | #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) |
| 436 | 438 | ||
| 437 | #ifdef __powerpc64__ | 439 | #ifdef __powerpc64__ |
| 440 | #ifdef CONFIG_PPC_BOOK3E | ||
| 441 | #define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500) | ||
| 442 | #else | ||
| 438 | #define CPU_FTRS_POSSIBLE \ | 443 | #define CPU_FTRS_POSSIBLE \ |
| 439 | (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ | 444 | (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ |
| 440 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ | 445 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ |
| 441 | CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ | 446 | CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ |
| 442 | CPU_FTR_1T_SEGMENT | CPU_FTR_VSX) | 447 | CPU_FTR_1T_SEGMENT | CPU_FTR_VSX) |
| 448 | #endif | ||
| 443 | #else | 449 | #else |
| 444 | enum { | 450 | enum { |
| 445 | CPU_FTRS_POSSIBLE = | 451 | CPU_FTRS_POSSIBLE = |
| @@ -473,16 +479,21 @@ enum { | |||
| 473 | #endif | 479 | #endif |
| 474 | #ifdef CONFIG_E500 | 480 | #ifdef CONFIG_E500 |
| 475 | CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | | 481 | CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | |
| 482 | CPU_FTRS_E5500 | | ||
| 476 | #endif | 483 | #endif |
| 477 | 0, | 484 | 0, |
| 478 | }; | 485 | }; |
| 479 | #endif /* __powerpc64__ */ | 486 | #endif /* __powerpc64__ */ |
| 480 | 487 | ||
| 481 | #ifdef __powerpc64__ | 488 | #ifdef __powerpc64__ |
| 489 | #ifdef CONFIG_PPC_BOOK3E | ||
| 490 | #define CPU_FTRS_ALWAYS (CPU_FTRS_E5500) | ||
| 491 | #else | ||
| 482 | #define CPU_FTRS_ALWAYS \ | 492 | #define CPU_FTRS_ALWAYS \ |
| 483 | (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ | 493 | (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ |
| 484 | CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \ | 494 | CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \ |
| 485 | CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE) | 495 | CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE) |
| 496 | #endif | ||
| 486 | #else | 497 | #else |
| 487 | enum { | 498 | enum { |
| 488 | CPU_FTRS_ALWAYS = | 499 | CPU_FTRS_ALWAYS = |
| @@ -513,6 +524,7 @@ enum { | |||
| 513 | #endif | 524 | #endif |
| 514 | #ifdef CONFIG_E500 | 525 | #ifdef CONFIG_E500 |
| 515 | CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & | 526 | CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & |
| 527 | CPU_FTRS_E5500 & | ||
| 516 | #endif | 528 | #endif |
| 517 | CPU_FTRS_POSSIBLE, | 529 | CPU_FTRS_POSSIBLE, |
| 518 | }; | 530 | }; |
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index 811f04ac3660..8d1569c29042 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h | |||
| @@ -162,7 +162,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); | |||
| 162 | * on platforms where such control is possible. | 162 | * on platforms where such control is possible. |
| 163 | */ | 163 | */ |
| 164 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ | 164 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ |
| 165 | defined(CONFIG_KPROBES) | 165 | defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) |
| 166 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_X | 166 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_X |
| 167 | #else | 167 | #else |
| 168 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX | 168 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index c9b68d07ac4f..b9602ee06deb 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
| @@ -1973,7 +1973,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
| 1973 | .pvr_mask = 0xffff0000, | 1973 | .pvr_mask = 0xffff0000, |
| 1974 | .pvr_value = 0x80240000, | 1974 | .pvr_value = 0x80240000, |
| 1975 | .cpu_name = "e5500", | 1975 | .cpu_name = "e5500", |
| 1976 | .cpu_features = CPU_FTRS_E500MC, | 1976 | .cpu_features = CPU_FTRS_E5500, |
| 1977 | .cpu_user_features = COMMON_USER_BOOKE, | 1977 | .cpu_user_features = COMMON_USER_BOOKE, |
| 1978 | .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | | 1978 | .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | |
| 1979 | MMU_FTR_USE_TLBILX, | 1979 | MMU_FTR_USE_TLBILX, |
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 3d3d416339dd..5b5e1f002a8e 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c | |||
| @@ -163,7 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu) | |||
| 163 | } | 163 | } |
| 164 | 164 | ||
| 165 | /* wait for all the CPUs to hit real mode but timeout if they don't come in */ | 165 | /* wait for all the CPUs to hit real mode but timeout if they don't come in */ |
| 166 | #if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) | 166 | #ifdef CONFIG_PPC_STD_MMU_64 |
| 167 | static void crash_kexec_wait_realmode(int cpu) | 167 | static void crash_kexec_wait_realmode(int cpu) |
| 168 | { | 168 | { |
| 169 | unsigned int msecs; | 169 | unsigned int msecs; |
| @@ -188,9 +188,7 @@ static void crash_kexec_wait_realmode(int cpu) | |||
| 188 | } | 188 | } |
| 189 | mb(); | 189 | mb(); |
| 190 | } | 190 | } |
| 191 | #else | 191 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
| 192 | static inline void crash_kexec_wait_realmode(int cpu) {} | ||
| 193 | #endif | ||
| 194 | 192 | ||
| 195 | /* | 193 | /* |
| 196 | * This function will be called by secondary cpus or by kexec cpu | 194 | * This function will be called by secondary cpus or by kexec cpu |
| @@ -235,7 +233,9 @@ void crash_kexec_secondary(struct pt_regs *regs) | |||
| 235 | crash_ipi_callback(regs); | 233 | crash_ipi_callback(regs); |
| 236 | } | 234 | } |
| 237 | 235 | ||
| 238 | #else | 236 | #else /* ! CONFIG_SMP */ |
| 237 | static inline void crash_kexec_wait_realmode(int cpu) {} | ||
| 238 | |||
| 239 | static void crash_kexec_prepare_cpus(int cpu) | 239 | static void crash_kexec_prepare_cpus(int cpu) |
| 240 | { | 240 | { |
| 241 | /* | 241 | /* |
| @@ -255,7 +255,7 @@ void crash_kexec_secondary(struct pt_regs *regs) | |||
| 255 | { | 255 | { |
| 256 | cpus_in_sr = CPU_MASK_NONE; | 256 | cpus_in_sr = CPU_MASK_NONE; |
| 257 | } | 257 | } |
| 258 | #endif | 258 | #endif /* CONFIG_SMP */ |
| 259 | 259 | ||
| 260 | /* | 260 | /* |
| 261 | * Register a function to be called on shutdown. Only use this if you | 261 | * Register a function to be called on shutdown. Only use this if you |
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index c00d4ca1ee15..28581f1ad2c0 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c | |||
| @@ -527,7 +527,7 @@ static int ibmebus_bus_pm_resume_noirq(struct device *dev) | |||
| 527 | 527 | ||
| 528 | #endif /* !CONFIG_SUSPEND */ | 528 | #endif /* !CONFIG_SUSPEND */ |
| 529 | 529 | ||
| 530 | #ifdef CONFIG_HIBERNATION | 530 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 531 | 531 | ||
| 532 | static int ibmebus_bus_pm_freeze(struct device *dev) | 532 | static int ibmebus_bus_pm_freeze(struct device *dev) |
| 533 | { | 533 | { |
| @@ -665,7 +665,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev) | |||
| 665 | return ret; | 665 | return ret; |
| 666 | } | 666 | } |
| 667 | 667 | ||
| 668 | #else /* !CONFIG_HIBERNATION */ | 668 | #else /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 669 | 669 | ||
| 670 | #define ibmebus_bus_pm_freeze NULL | 670 | #define ibmebus_bus_pm_freeze NULL |
| 671 | #define ibmebus_bus_pm_thaw NULL | 671 | #define ibmebus_bus_pm_thaw NULL |
| @@ -676,7 +676,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev) | |||
| 676 | #define ibmebus_bus_pm_poweroff_noirq NULL | 676 | #define ibmebus_bus_pm_poweroff_noirq NULL |
| 677 | #define ibmebus_bus_pm_restore_noirq NULL | 677 | #define ibmebus_bus_pm_restore_noirq NULL |
| 678 | 678 | ||
| 679 | #endif /* !CONFIG_HIBERNATION */ | 679 | #endif /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 680 | 680 | ||
| 681 | static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { | 681 | static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { |
| 682 | .prepare = ibmebus_bus_pm_prepare, | 682 | .prepare = ibmebus_bus_pm_prepare, |
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index c834757bebc0..2b97b80d6d7d 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c | |||
| @@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void) | |||
| 330 | if (!parent) | 330 | if (!parent) |
| 331 | continue; | 331 | continue; |
| 332 | if (of_match_node(legacy_serial_parents, parent) != NULL) { | 332 | if (of_match_node(legacy_serial_parents, parent) != NULL) { |
| 333 | index = add_legacy_soc_port(np, np); | 333 | if (of_device_is_available(np)) { |
| 334 | if (index >= 0 && np == stdout) | 334 | index = add_legacy_soc_port(np, np); |
| 335 | legacy_serial_console = index; | 335 | if (index >= 0 && np == stdout) |
| 336 | legacy_serial_console = index; | ||
| 337 | } | ||
| 336 | } | 338 | } |
| 337 | of_node_put(parent); | 339 | of_node_put(parent); |
| 338 | } | 340 | } |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index c4063b7f49a0..822f63008ae1 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
| @@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], | |||
| 398 | return 0; | 398 | return 0; |
| 399 | } | 399 | } |
| 400 | 400 | ||
| 401 | static u64 check_and_compute_delta(u64 prev, u64 val) | ||
| 402 | { | ||
| 403 | u64 delta = (val - prev) & 0xfffffffful; | ||
| 404 | |||
| 405 | /* | ||
| 406 | * POWER7 can roll back counter values, if the new value is smaller | ||
| 407 | * than the previous value it will cause the delta and the counter to | ||
| 408 | * have bogus values unless we rolled a counter over. If a coutner is | ||
| 409 | * rolled back, it will be smaller, but within 256, which is the maximum | ||
| 410 | * number of events to rollback at once. If we dectect a rollback | ||
| 411 | * return 0. This can lead to a small lack of precision in the | ||
| 412 | * counters. | ||
| 413 | */ | ||
| 414 | if (prev > val && (prev - val) < 256) | ||
| 415 | delta = 0; | ||
| 416 | |||
| 417 | return delta; | ||
| 418 | } | ||
| 419 | |||
| 401 | static void power_pmu_read(struct perf_event *event) | 420 | static void power_pmu_read(struct perf_event *event) |
| 402 | { | 421 | { |
| 403 | s64 val, delta, prev; | 422 | s64 val, delta, prev; |
| @@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event) | |||
| 416 | prev = local64_read(&event->hw.prev_count); | 435 | prev = local64_read(&event->hw.prev_count); |
| 417 | barrier(); | 436 | barrier(); |
| 418 | val = read_pmc(event->hw.idx); | 437 | val = read_pmc(event->hw.idx); |
| 438 | delta = check_and_compute_delta(prev, val); | ||
| 439 | if (!delta) | ||
| 440 | return; | ||
| 419 | } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); | 441 | } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); |
| 420 | 442 | ||
| 421 | /* The counters are only 32 bits wide */ | ||
| 422 | delta = (val - prev) & 0xfffffffful; | ||
| 423 | local64_add(delta, &event->count); | 443 | local64_add(delta, &event->count); |
| 424 | local64_sub(delta, &event->hw.period_left); | 444 | local64_sub(delta, &event->hw.period_left); |
| 425 | } | 445 | } |
| @@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw, | |||
| 449 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | 469 | val = (event->hw.idx == 5) ? pmc5 : pmc6; |
| 450 | prev = local64_read(&event->hw.prev_count); | 470 | prev = local64_read(&event->hw.prev_count); |
| 451 | event->hw.idx = 0; | 471 | event->hw.idx = 0; |
| 452 | delta = (val - prev) & 0xfffffffful; | 472 | delta = check_and_compute_delta(prev, val); |
| 453 | local64_add(delta, &event->count); | 473 | if (delta) |
| 474 | local64_add(delta, &event->count); | ||
| 454 | } | 475 | } |
| 455 | } | 476 | } |
| 456 | 477 | ||
| @@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw, | |||
| 458 | unsigned long pmc5, unsigned long pmc6) | 479 | unsigned long pmc5, unsigned long pmc6) |
| 459 | { | 480 | { |
| 460 | struct perf_event *event; | 481 | struct perf_event *event; |
| 461 | u64 val; | 482 | u64 val, prev; |
| 462 | int i; | 483 | int i; |
| 463 | 484 | ||
| 464 | for (i = 0; i < cpuhw->n_limited; ++i) { | 485 | for (i = 0; i < cpuhw->n_limited; ++i) { |
| 465 | event = cpuhw->limited_counter[i]; | 486 | event = cpuhw->limited_counter[i]; |
| 466 | event->hw.idx = cpuhw->limited_hwidx[i]; | 487 | event->hw.idx = cpuhw->limited_hwidx[i]; |
| 467 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | 488 | val = (event->hw.idx == 5) ? pmc5 : pmc6; |
| 468 | local64_set(&event->hw.prev_count, val); | 489 | prev = local64_read(&event->hw.prev_count); |
| 490 | if (check_and_compute_delta(prev, val)) | ||
| 491 | local64_set(&event->hw.prev_count, val); | ||
| 469 | perf_event_update_userpage(event); | 492 | perf_event_update_userpage(event); |
| 470 | } | 493 | } |
| 471 | } | 494 | } |
| @@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
| 1197 | 1220 | ||
| 1198 | /* we don't have to worry about interrupts here */ | 1221 | /* we don't have to worry about interrupts here */ |
| 1199 | prev = local64_read(&event->hw.prev_count); | 1222 | prev = local64_read(&event->hw.prev_count); |
| 1200 | delta = (val - prev) & 0xfffffffful; | 1223 | delta = check_and_compute_delta(prev, val); |
| 1201 | local64_add(delta, &event->count); | 1224 | local64_add(delta, &event->count); |
| 1202 | 1225 | ||
| 1203 | /* | 1226 | /* |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 375480c56eb9..f33acfd872ad 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
| @@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb) | |||
| 229 | u64 stolen = 0; | 229 | u64 stolen = 0; |
| 230 | u64 dtb; | 230 | u64 dtb; |
| 231 | 231 | ||
| 232 | if (!dtl) | ||
| 233 | return 0; | ||
| 234 | |||
| 232 | if (i == vpa->dtl_idx) | 235 | if (i == vpa->dtl_idx) |
| 233 | return 0; | 236 | return 0; |
| 234 | while (i < vpa->dtl_idx) { | 237 | while (i < vpa->dtl_idx) { |
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index a830c5e80657..bc5f0dc6ae1e 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c | |||
| @@ -842,6 +842,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr) | |||
| 842 | mpic_setup_this_cpu(); | 842 | mpic_setup_this_cpu(); |
| 843 | } | 843 | } |
| 844 | 844 | ||
| 845 | #ifdef CONFIG_PPC64 | ||
| 845 | #ifdef CONFIG_HOTPLUG_CPU | 846 | #ifdef CONFIG_HOTPLUG_CPU |
| 846 | static int smp_core99_cpu_notify(struct notifier_block *self, | 847 | static int smp_core99_cpu_notify(struct notifier_block *self, |
| 847 | unsigned long action, void *hcpu) | 848 | unsigned long action, void *hcpu) |
| @@ -879,7 +880,6 @@ static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { | |||
| 879 | 880 | ||
| 880 | static void __init smp_core99_bringup_done(void) | 881 | static void __init smp_core99_bringup_done(void) |
| 881 | { | 882 | { |
| 882 | #ifdef CONFIG_PPC64 | ||
| 883 | extern void g5_phy_disable_cpu1(void); | 883 | extern void g5_phy_disable_cpu1(void); |
| 884 | 884 | ||
| 885 | /* Close i2c bus if it was used for tb sync */ | 885 | /* Close i2c bus if it was used for tb sync */ |
| @@ -894,14 +894,14 @@ static void __init smp_core99_bringup_done(void) | |||
| 894 | set_cpu_present(1, false); | 894 | set_cpu_present(1, false); |
| 895 | g5_phy_disable_cpu1(); | 895 | g5_phy_disable_cpu1(); |
| 896 | } | 896 | } |
| 897 | #endif /* CONFIG_PPC64 */ | ||
| 898 | |||
| 899 | #ifdef CONFIG_HOTPLUG_CPU | 897 | #ifdef CONFIG_HOTPLUG_CPU |
| 900 | register_cpu_notifier(&smp_core99_cpu_nb); | 898 | register_cpu_notifier(&smp_core99_cpu_nb); |
| 901 | #endif | 899 | #endif |
| 900 | |||
| 902 | if (ppc_md.progress) | 901 | if (ppc_md.progress) |
| 903 | ppc_md.progress("smp_core99_bringup_done", 0x349); | 902 | ppc_md.progress("smp_core99_bringup_done", 0x349); |
| 904 | } | 903 | } |
| 904 | #endif /* CONFIG_PPC64 */ | ||
| 905 | 905 | ||
| 906 | #ifdef CONFIG_HOTPLUG_CPU | 906 | #ifdef CONFIG_HOTPLUG_CPU |
| 907 | 907 | ||
| @@ -975,7 +975,9 @@ static void pmac_cpu_die(void) | |||
| 975 | struct smp_ops_t core99_smp_ops = { | 975 | struct smp_ops_t core99_smp_ops = { |
| 976 | .message_pass = smp_mpic_message_pass, | 976 | .message_pass = smp_mpic_message_pass, |
| 977 | .probe = smp_core99_probe, | 977 | .probe = smp_core99_probe, |
| 978 | #ifdef CONFIG_PPC64 | ||
| 978 | .bringup_done = smp_core99_bringup_done, | 979 | .bringup_done = smp_core99_bringup_done, |
| 980 | #endif | ||
| 979 | .kick_cpu = smp_core99_kick_cpu, | 981 | .kick_cpu = smp_core99_kick_cpu, |
| 980 | .setup_cpu = smp_core99_setup_cpu, | 982 | .setup_cpu = smp_core99_setup_cpu, |
| 981 | .give_timebase = smp_core99_give_timebase, | 983 | .give_timebase = smp_core99_give_timebase, |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 000724149089..6c42cfde8415 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
| @@ -287,14 +287,22 @@ static int alloc_dispatch_logs(void) | |||
| 287 | int cpu, ret; | 287 | int cpu, ret; |
| 288 | struct paca_struct *pp; | 288 | struct paca_struct *pp; |
| 289 | struct dtl_entry *dtl; | 289 | struct dtl_entry *dtl; |
| 290 | struct kmem_cache *dtl_cache; | ||
| 290 | 291 | ||
| 291 | if (!firmware_has_feature(FW_FEATURE_SPLPAR)) | 292 | if (!firmware_has_feature(FW_FEATURE_SPLPAR)) |
| 292 | return 0; | 293 | return 0; |
| 293 | 294 | ||
| 295 | dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, | ||
| 296 | DISPATCH_LOG_BYTES, 0, NULL); | ||
| 297 | if (!dtl_cache) { | ||
| 298 | pr_warn("Failed to create dispatch trace log buffer cache\n"); | ||
| 299 | pr_warn("Stolen time statistics will be unreliable\n"); | ||
| 300 | return 0; | ||
| 301 | } | ||
| 302 | |||
| 294 | for_each_possible_cpu(cpu) { | 303 | for_each_possible_cpu(cpu) { |
| 295 | pp = &paca[cpu]; | 304 | pp = &paca[cpu]; |
| 296 | dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL, | 305 | dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); |
| 297 | cpu_to_node(cpu)); | ||
| 298 | if (!dtl) { | 306 | if (!dtl) { |
| 299 | pr_warn("Failed to allocate dispatch trace log for cpu %d\n", | 307 | pr_warn("Failed to allocate dispatch trace log for cpu %d\n", |
| 300 | cpu); | 308 | cpu); |
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index f8f7f28c6343..68ca9290df94 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c | |||
| @@ -324,6 +324,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary) | |||
| 324 | struct resource rsrc; | 324 | struct resource rsrc; |
| 325 | const int *bus_range; | 325 | const int *bus_range; |
| 326 | 326 | ||
| 327 | if (!of_device_is_available(dev)) { | ||
| 328 | pr_warning("%s: disabled\n", dev->full_name); | ||
| 329 | return -ENODEV; | ||
| 330 | } | ||
| 331 | |||
| 327 | pr_debug("Adding PCI host bridge %s\n", dev->full_name); | 332 | pr_debug("Adding PCI host bridge %s\n", dev->full_name); |
| 328 | 333 | ||
| 329 | /* Fetch host bridge registers address */ | 334 | /* Fetch host bridge registers address */ |
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 14232d57369c..49798532b477 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c | |||
| @@ -1457,7 +1457,6 @@ int fsl_rio_setup(struct platform_device *dev) | |||
| 1457 | port->ops = ops; | 1457 | port->ops = ops; |
| 1458 | port->priv = priv; | 1458 | port->priv = priv; |
| 1459 | port->phys_efptr = 0x100; | 1459 | port->phys_efptr = 0x100; |
| 1460 | rio_register_mport(port); | ||
| 1461 | 1460 | ||
| 1462 | priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); | 1461 | priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); |
| 1463 | rio_regs_win = priv->regs_win; | 1462 | rio_regs_win = priv->regs_win; |
| @@ -1504,6 +1503,9 @@ int fsl_rio_setup(struct platform_device *dev) | |||
| 1504 | dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", | 1503 | dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", |
| 1505 | port->sys_size ? 65536 : 256); | 1504 | port->sys_size ? 65536 : 256); |
| 1506 | 1505 | ||
| 1506 | if (rio_register_mport(port)) | ||
| 1507 | goto err; | ||
| 1508 | |||
| 1507 | if (port->host_deviceid >= 0) | 1509 | if (port->host_deviceid >= 0) |
| 1508 | out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | | 1510 | out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | |
| 1509 | RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); | 1511 | RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); |
diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86 index 02fb017fed47..a9da516a5274 100644 --- a/arch/um/Kconfig.x86 +++ b/arch/um/Kconfig.x86 | |||
| @@ -4,6 +4,10 @@ menu "UML-specific options" | |||
| 4 | 4 | ||
| 5 | menu "Host processor type and features" | 5 | menu "Host processor type and features" |
| 6 | 6 | ||
| 7 | config CMPXCHG_LOCAL | ||
| 8 | bool | ||
| 9 | default n | ||
| 10 | |||
| 7 | source "arch/x86/Kconfig.cpu" | 11 | source "arch/x86/Kconfig.cpu" |
| 8 | 12 | ||
| 9 | endmenu | 13 | endmenu |
diff --git a/arch/um/include/asm/bug.h b/arch/um/include/asm/bug.h new file mode 100644 index 000000000000..9e33b864c359 --- /dev/null +++ b/arch/um/include/asm/bug.h | |||
| @@ -0,0 +1,6 @@ | |||
| 1 | #ifndef __UM_BUG_H | ||
| 2 | #define __UM_BUG_H | ||
| 3 | |||
| 4 | #include <asm-generic/bug.h> | ||
| 5 | |||
| 6 | #endif | ||
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index fd5a1f365c95..3cce71413d0b 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
| @@ -96,11 +96,15 @@ | |||
| 96 | #define MSR_IA32_MC0_ADDR 0x00000402 | 96 | #define MSR_IA32_MC0_ADDR 0x00000402 |
| 97 | #define MSR_IA32_MC0_MISC 0x00000403 | 97 | #define MSR_IA32_MC0_MISC 0x00000403 |
| 98 | 98 | ||
| 99 | #define MSR_AMD64_MC0_MASK 0xc0010044 | ||
| 100 | |||
| 99 | #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) | 101 | #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) |
| 100 | #define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) | 102 | #define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) |
| 101 | #define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) | 103 | #define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) |
| 102 | #define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) | 104 | #define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) |
| 103 | 105 | ||
| 106 | #define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x)) | ||
| 107 | |||
| 104 | /* These are consecutive and not in the normal 4er MCE bank block */ | 108 | /* These are consecutive and not in the normal 4er MCE bank block */ |
| 105 | #define MSR_IA32_MC0_CTL2 0x00000280 | 109 | #define MSR_IA32_MC0_CTL2 0x00000280 |
| 106 | #define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) | 110 | #define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 3ecece0217ef..3532d3bf8105 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
| @@ -615,6 +615,25 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
| 615 | /* As a rule processors have APIC timer running in deep C states */ | 615 | /* As a rule processors have APIC timer running in deep C states */ |
| 616 | if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) | 616 | if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) |
| 617 | set_cpu_cap(c, X86_FEATURE_ARAT); | 617 | set_cpu_cap(c, X86_FEATURE_ARAT); |
| 618 | |||
| 619 | /* | ||
| 620 | * Disable GART TLB Walk Errors on Fam10h. We do this here | ||
| 621 | * because this is always needed when GART is enabled, even in a | ||
| 622 | * kernel which has no MCE support built in. | ||
| 623 | */ | ||
| 624 | if (c->x86 == 0x10) { | ||
| 625 | /* | ||
| 626 | * BIOS should disable GartTlbWlk Errors themself. If | ||
| 627 | * it doesn't do it here as suggested by the BKDG. | ||
| 628 | * | ||
| 629 | * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 | ||
| 630 | */ | ||
| 631 | u64 mask; | ||
| 632 | |||
| 633 | rdmsrl(MSR_AMD64_MCx_MASK(4), mask); | ||
| 634 | mask |= (1 << 10); | ||
| 635 | wrmsrl(MSR_AMD64_MCx_MASK(4), mask); | ||
| 636 | } | ||
| 618 | } | 637 | } |
| 619 | 638 | ||
| 620 | #ifdef CONFIG_X86_32 | 639 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c2871d3c71b6..8ed8908cc9f7 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -312,6 +312,26 @@ void __cpuinit smp_store_cpu_info(int id) | |||
| 312 | identify_secondary_cpu(c); | 312 | identify_secondary_cpu(c); |
| 313 | } | 313 | } |
| 314 | 314 | ||
| 315 | static void __cpuinit check_cpu_siblings_on_same_node(int cpu1, int cpu2) | ||
| 316 | { | ||
| 317 | int node1 = early_cpu_to_node(cpu1); | ||
| 318 | int node2 = early_cpu_to_node(cpu2); | ||
| 319 | |||
| 320 | /* | ||
| 321 | * Our CPU scheduler assumes all logical cpus in the same physical cpu | ||
| 322 | * share the same node. But, buggy ACPI or NUMA emulation might assign | ||
| 323 | * them to different node. Fix it. | ||
| 324 | */ | ||
| 325 | if (node1 != node2) { | ||
| 326 | pr_warning("CPU %d in node %d and CPU %d in node %d are in the same physical CPU. forcing same node %d\n", | ||
| 327 | cpu1, node1, cpu2, node2, node2); | ||
| 328 | |||
| 329 | numa_remove_cpu(cpu1); | ||
| 330 | numa_set_node(cpu1, node2); | ||
| 331 | numa_add_cpu(cpu1); | ||
| 332 | } | ||
| 333 | } | ||
| 334 | |||
| 315 | static void __cpuinit link_thread_siblings(int cpu1, int cpu2) | 335 | static void __cpuinit link_thread_siblings(int cpu1, int cpu2) |
| 316 | { | 336 | { |
| 317 | cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); | 337 | cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); |
| @@ -320,6 +340,7 @@ static void __cpuinit link_thread_siblings(int cpu1, int cpu2) | |||
| 320 | cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); | 340 | cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); |
| 321 | cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); | 341 | cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); |
| 322 | cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); | 342 | cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); |
| 343 | check_cpu_siblings_on_same_node(cpu1, cpu2); | ||
| 323 | } | 344 | } |
| 324 | 345 | ||
| 325 | 346 | ||
| @@ -361,10 +382,12 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
| 361 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | 382 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { |
| 362 | cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); | 383 | cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); |
| 363 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); | 384 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); |
| 385 | check_cpu_siblings_on_same_node(cpu, i); | ||
| 364 | } | 386 | } |
| 365 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | 387 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { |
| 366 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | 388 | cpumask_set_cpu(i, cpu_core_mask(cpu)); |
| 367 | cpumask_set_cpu(cpu, cpu_core_mask(i)); | 389 | cpumask_set_cpu(cpu, cpu_core_mask(i)); |
| 390 | check_cpu_siblings_on_same_node(cpu, i); | ||
| 368 | /* | 391 | /* |
| 369 | * Does this new cpu bringup a new core? | 392 | * Does this new cpu bringup a new core? |
| 370 | */ | 393 | */ |
diff --git a/arch/x86/platform/ce4100/falconfalls.dts b/arch/x86/platform/ce4100/falconfalls.dts index dc701ea58546..2d6d226f2b10 100644 --- a/arch/x86/platform/ce4100/falconfalls.dts +++ b/arch/x86/platform/ce4100/falconfalls.dts | |||
| @@ -74,6 +74,7 @@ | |||
| 74 | compatible = "intel,ce4100-pci", "pci"; | 74 | compatible = "intel,ce4100-pci", "pci"; |
| 75 | device_type = "pci"; | 75 | device_type = "pci"; |
| 76 | bus-range = <1 1>; | 76 | bus-range = <1 1>; |
| 77 | reg = <0x0800 0x0 0x0 0x0 0x0>; | ||
| 77 | ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>; | 78 | ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>; |
| 78 | 79 | ||
| 79 | interrupt-parent = <&ioapic2>; | 80 | interrupt-parent = <&ioapic2>; |
| @@ -412,6 +413,7 @@ | |||
| 412 | #address-cells = <2>; | 413 | #address-cells = <2>; |
| 413 | #size-cells = <1>; | 414 | #size-cells = <1>; |
| 414 | compatible = "isa"; | 415 | compatible = "isa"; |
| 416 | reg = <0xf800 0x0 0x0 0x0 0x0>; | ||
| 415 | ranges = <1 0 0 0 0 0x100>; | 417 | ranges = <1 0 0 0 0 0x100>; |
| 416 | 418 | ||
| 417 | rtc@70 { | 419 | rtc@70 { |
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index 5c0207bf959b..275dbc19e2cf 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c | |||
| @@ -97,11 +97,11 @@ static int __init sfi_parse_mtmr(struct sfi_table_header *table) | |||
| 97 | pentry->freq_hz, pentry->irq); | 97 | pentry->freq_hz, pentry->irq); |
| 98 | if (!pentry->irq) | 98 | if (!pentry->irq) |
| 99 | continue; | 99 | continue; |
| 100 | mp_irq.type = MP_IOAPIC; | 100 | mp_irq.type = MP_INTSRC; |
| 101 | mp_irq.irqtype = mp_INT; | 101 | mp_irq.irqtype = mp_INT; |
| 102 | /* triggering mode edge bit 2-3, active high polarity bit 0-1 */ | 102 | /* triggering mode edge bit 2-3, active high polarity bit 0-1 */ |
| 103 | mp_irq.irqflag = 5; | 103 | mp_irq.irqflag = 5; |
| 104 | mp_irq.srcbus = 0; | 104 | mp_irq.srcbus = MP_BUS_ISA; |
| 105 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ | 105 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ |
| 106 | mp_irq.dstapic = MP_APIC_ALL; | 106 | mp_irq.dstapic = MP_APIC_ALL; |
| 107 | mp_irq.dstirq = pentry->irq; | 107 | mp_irq.dstirq = pentry->irq; |
| @@ -168,10 +168,10 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table) | |||
| 168 | for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { | 168 | for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { |
| 169 | pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n", | 169 | pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n", |
| 170 | totallen, (u32)pentry->phys_addr, pentry->irq); | 170 | totallen, (u32)pentry->phys_addr, pentry->irq); |
| 171 | mp_irq.type = MP_IOAPIC; | 171 | mp_irq.type = MP_INTSRC; |
| 172 | mp_irq.irqtype = mp_INT; | 172 | mp_irq.irqtype = mp_INT; |
| 173 | mp_irq.irqflag = 0xf; /* level trigger and active low */ | 173 | mp_irq.irqflag = 0xf; /* level trigger and active low */ |
| 174 | mp_irq.srcbus = 0; | 174 | mp_irq.srcbus = MP_BUS_ISA; |
| 175 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ | 175 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ |
| 176 | mp_irq.dstapic = MP_APIC_ALL; | 176 | mp_irq.dstapic = MP_APIC_ALL; |
| 177 | mp_irq.dstirq = pentry->irq; | 177 | mp_irq.dstirq = pentry->irq; |
| @@ -282,7 +282,7 @@ void __init x86_mrst_early_setup(void) | |||
| 282 | /* Avoid searching for BIOS MP tables */ | 282 | /* Avoid searching for BIOS MP tables */ |
| 283 | x86_init.mpparse.find_smp_config = x86_init_noop; | 283 | x86_init.mpparse.find_smp_config = x86_init_noop; |
| 284 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; | 284 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; |
| 285 | 285 | set_bit(MP_BUS_ISA, mp_bus_not_pci); | |
| 286 | } | 286 | } |
| 287 | 287 | ||
| 288 | /* | 288 | /* |
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 1c7121ba18ff..5cc821cb2e09 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig | |||
| @@ -39,6 +39,7 @@ config XEN_MAX_DOMAIN_MEMORY | |||
| 39 | config XEN_SAVE_RESTORE | 39 | config XEN_SAVE_RESTORE |
| 40 | bool | 40 | bool |
| 41 | depends on XEN | 41 | depends on XEN |
| 42 | select HIBERNATE_CALLBACKS | ||
| 42 | default y | 43 | default y |
| 43 | 44 | ||
| 44 | config XEN_DEBUG_FS | 45 | config XEN_DEBUG_FS |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 49dbd78ec3cb..e3c6a06cf725 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
| @@ -238,6 +238,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, | |||
| 238 | static __init void xen_init_cpuid_mask(void) | 238 | static __init void xen_init_cpuid_mask(void) |
| 239 | { | 239 | { |
| 240 | unsigned int ax, bx, cx, dx; | 240 | unsigned int ax, bx, cx, dx; |
| 241 | unsigned int xsave_mask; | ||
| 241 | 242 | ||
| 242 | cpuid_leaf1_edx_mask = | 243 | cpuid_leaf1_edx_mask = |
| 243 | ~((1 << X86_FEATURE_MCE) | /* disable MCE */ | 244 | ~((1 << X86_FEATURE_MCE) | /* disable MCE */ |
| @@ -249,24 +250,16 @@ static __init void xen_init_cpuid_mask(void) | |||
| 249 | cpuid_leaf1_edx_mask &= | 250 | cpuid_leaf1_edx_mask &= |
| 250 | ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ | 251 | ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ |
| 251 | (1 << X86_FEATURE_ACPI)); /* disable ACPI */ | 252 | (1 << X86_FEATURE_ACPI)); /* disable ACPI */ |
| 252 | |||
| 253 | ax = 1; | 253 | ax = 1; |
| 254 | cx = 0; | ||
| 255 | xen_cpuid(&ax, &bx, &cx, &dx); | 254 | xen_cpuid(&ax, &bx, &cx, &dx); |
| 256 | 255 | ||
| 257 | /* cpuid claims we support xsave; try enabling it to see what happens */ | 256 | xsave_mask = |
| 258 | if (cx & (1 << (X86_FEATURE_XSAVE % 32))) { | 257 | (1 << (X86_FEATURE_XSAVE % 32)) | |
| 259 | unsigned long cr4; | 258 | (1 << (X86_FEATURE_OSXSAVE % 32)); |
| 260 | |||
| 261 | set_in_cr4(X86_CR4_OSXSAVE); | ||
| 262 | |||
| 263 | cr4 = read_cr4(); | ||
| 264 | 259 | ||
| 265 | if ((cr4 & X86_CR4_OSXSAVE) == 0) | 260 | /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ |
| 266 | cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32)); | 261 | if ((cx & xsave_mask) != xsave_mask) |
| 267 | 262 | cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ | |
| 268 | clear_in_cr4(X86_CR4_OSXSAVE); | ||
| 269 | } | ||
| 270 | } | 263 | } |
| 271 | 264 | ||
| 272 | static void xen_set_debugreg(int reg, unsigned long val) | 265 | static void xen_set_debugreg(int reg, unsigned long val) |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index c82df6c9c0f0..a991b57f91fe 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
| @@ -565,13 +565,13 @@ pte_t xen_make_pte_debug(pteval_t pte) | |||
| 565 | if (io_page && | 565 | if (io_page && |
| 566 | (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { | 566 | (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { |
| 567 | other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; | 567 | other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; |
| 568 | WARN(addr != other_addr, | 568 | WARN_ONCE(addr != other_addr, |
| 569 | "0x%lx is using VM_IO, but it is 0x%lx!\n", | 569 | "0x%lx is using VM_IO, but it is 0x%lx!\n", |
| 570 | (unsigned long)addr, (unsigned long)other_addr); | 570 | (unsigned long)addr, (unsigned long)other_addr); |
| 571 | } else { | 571 | } else { |
| 572 | pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; | 572 | pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; |
| 573 | other_addr = (_pte.pte & PTE_PFN_MASK); | 573 | other_addr = (_pte.pte & PTE_PFN_MASK); |
| 574 | WARN((addr == other_addr) && (!io_page) && (!iomap_set), | 574 | WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set), |
| 575 | "0x%lx is missing VM_IO (and wasn't fixed)!\n", | 575 | "0x%lx is missing VM_IO (and wasn't fixed)!\n", |
| 576 | (unsigned long)addr); | 576 | (unsigned long)addr); |
| 577 | } | 577 | } |
diff --git a/block/blk-core.c b/block/blk-core.c index 90f22cc30799..5fa3dd2705c6 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
| @@ -198,26 +198,13 @@ void blk_dump_rq_flags(struct request *rq, char *msg) | |||
| 198 | } | 198 | } |
| 199 | EXPORT_SYMBOL(blk_dump_rq_flags); | 199 | EXPORT_SYMBOL(blk_dump_rq_flags); |
| 200 | 200 | ||
| 201 | /* | ||
| 202 | * Make sure that plugs that were pending when this function was entered, | ||
| 203 | * are now complete and requests pushed to the queue. | ||
| 204 | */ | ||
| 205 | static inline void queue_sync_plugs(struct request_queue *q) | ||
| 206 | { | ||
| 207 | /* | ||
| 208 | * If the current process is plugged and has barriers submitted, | ||
| 209 | * we will livelock if we don't unplug first. | ||
| 210 | */ | ||
| 211 | blk_flush_plug(current); | ||
| 212 | } | ||
| 213 | |||
| 214 | static void blk_delay_work(struct work_struct *work) | 201 | static void blk_delay_work(struct work_struct *work) |
| 215 | { | 202 | { |
| 216 | struct request_queue *q; | 203 | struct request_queue *q; |
| 217 | 204 | ||
| 218 | q = container_of(work, struct request_queue, delay_work.work); | 205 | q = container_of(work, struct request_queue, delay_work.work); |
| 219 | spin_lock_irq(q->queue_lock); | 206 | spin_lock_irq(q->queue_lock); |
| 220 | __blk_run_queue(q, false); | 207 | __blk_run_queue(q); |
| 221 | spin_unlock_irq(q->queue_lock); | 208 | spin_unlock_irq(q->queue_lock); |
| 222 | } | 209 | } |
| 223 | 210 | ||
| @@ -233,7 +220,8 @@ static void blk_delay_work(struct work_struct *work) | |||
| 233 | */ | 220 | */ |
| 234 | void blk_delay_queue(struct request_queue *q, unsigned long msecs) | 221 | void blk_delay_queue(struct request_queue *q, unsigned long msecs) |
| 235 | { | 222 | { |
| 236 | schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs)); | 223 | queue_delayed_work(kblockd_workqueue, &q->delay_work, |
| 224 | msecs_to_jiffies(msecs)); | ||
| 237 | } | 225 | } |
| 238 | EXPORT_SYMBOL(blk_delay_queue); | 226 | EXPORT_SYMBOL(blk_delay_queue); |
| 239 | 227 | ||
| @@ -251,7 +239,7 @@ void blk_start_queue(struct request_queue *q) | |||
| 251 | WARN_ON(!irqs_disabled()); | 239 | WARN_ON(!irqs_disabled()); |
| 252 | 240 | ||
| 253 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); | 241 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); |
| 254 | __blk_run_queue(q, false); | 242 | __blk_run_queue(q); |
| 255 | } | 243 | } |
| 256 | EXPORT_SYMBOL(blk_start_queue); | 244 | EXPORT_SYMBOL(blk_start_queue); |
| 257 | 245 | ||
| @@ -298,7 +286,6 @@ void blk_sync_queue(struct request_queue *q) | |||
| 298 | { | 286 | { |
| 299 | del_timer_sync(&q->timeout); | 287 | del_timer_sync(&q->timeout); |
| 300 | cancel_delayed_work_sync(&q->delay_work); | 288 | cancel_delayed_work_sync(&q->delay_work); |
| 301 | queue_sync_plugs(q); | ||
| 302 | } | 289 | } |
| 303 | EXPORT_SYMBOL(blk_sync_queue); | 290 | EXPORT_SYMBOL(blk_sync_queue); |
| 304 | 291 | ||
| @@ -310,9 +297,8 @@ EXPORT_SYMBOL(blk_sync_queue); | |||
| 310 | * Description: | 297 | * Description: |
| 311 | * See @blk_run_queue. This variant must be called with the queue lock | 298 | * See @blk_run_queue. This variant must be called with the queue lock |
| 312 | * held and interrupts disabled. | 299 | * held and interrupts disabled. |
| 313 | * | ||
| 314 | */ | 300 | */ |
| 315 | void __blk_run_queue(struct request_queue *q, bool force_kblockd) | 301 | void __blk_run_queue(struct request_queue *q) |
| 316 | { | 302 | { |
| 317 | if (unlikely(blk_queue_stopped(q))) | 303 | if (unlikely(blk_queue_stopped(q))) |
| 318 | return; | 304 | return; |
| @@ -321,7 +307,7 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd) | |||
| 321 | * Only recurse once to avoid overrunning the stack, let the unplug | 307 | * Only recurse once to avoid overrunning the stack, let the unplug |
| 322 | * handling reinvoke the handler shortly if we already got there. | 308 | * handling reinvoke the handler shortly if we already got there. |
| 323 | */ | 309 | */ |
| 324 | if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { | 310 | if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { |
| 325 | q->request_fn(q); | 311 | q->request_fn(q); |
| 326 | queue_flag_clear(QUEUE_FLAG_REENTER, q); | 312 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
| 327 | } else | 313 | } else |
| @@ -330,6 +316,20 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd) | |||
| 330 | EXPORT_SYMBOL(__blk_run_queue); | 316 | EXPORT_SYMBOL(__blk_run_queue); |
| 331 | 317 | ||
| 332 | /** | 318 | /** |
| 319 | * blk_run_queue_async - run a single device queue in workqueue context | ||
| 320 | * @q: The queue to run | ||
| 321 | * | ||
| 322 | * Description: | ||
| 323 | * Tells kblockd to perform the equivalent of @blk_run_queue on behalf | ||
| 324 | * of us. | ||
| 325 | */ | ||
| 326 | void blk_run_queue_async(struct request_queue *q) | ||
| 327 | { | ||
| 328 | if (likely(!blk_queue_stopped(q))) | ||
| 329 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); | ||
| 330 | } | ||
| 331 | |||
| 332 | /** | ||
| 333 | * blk_run_queue - run a single device queue | 333 | * blk_run_queue - run a single device queue |
| 334 | * @q: The queue to run | 334 | * @q: The queue to run |
| 335 | * | 335 | * |
| @@ -342,7 +342,7 @@ void blk_run_queue(struct request_queue *q) | |||
| 342 | unsigned long flags; | 342 | unsigned long flags; |
| 343 | 343 | ||
| 344 | spin_lock_irqsave(q->queue_lock, flags); | 344 | spin_lock_irqsave(q->queue_lock, flags); |
| 345 | __blk_run_queue(q, false); | 345 | __blk_run_queue(q); |
| 346 | spin_unlock_irqrestore(q->queue_lock, flags); | 346 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 347 | } | 347 | } |
| 348 | EXPORT_SYMBOL(blk_run_queue); | 348 | EXPORT_SYMBOL(blk_run_queue); |
| @@ -991,7 +991,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, | |||
| 991 | blk_queue_end_tag(q, rq); | 991 | blk_queue_end_tag(q, rq); |
| 992 | 992 | ||
| 993 | add_acct_request(q, rq, where); | 993 | add_acct_request(q, rq, where); |
| 994 | __blk_run_queue(q, false); | 994 | __blk_run_queue(q); |
| 995 | spin_unlock_irqrestore(q->queue_lock, flags); | 995 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 996 | } | 996 | } |
| 997 | EXPORT_SYMBOL(blk_insert_request); | 997 | EXPORT_SYMBOL(blk_insert_request); |
| @@ -1311,7 +1311,15 @@ get_rq: | |||
| 1311 | 1311 | ||
| 1312 | plug = current->plug; | 1312 | plug = current->plug; |
| 1313 | if (plug) { | 1313 | if (plug) { |
| 1314 | if (!plug->should_sort && !list_empty(&plug->list)) { | 1314 | /* |
| 1315 | * If this is the first request added after a plug, fire | ||
| 1316 | * of a plug trace. If others have been added before, check | ||
| 1317 | * if we have multiple devices in this plug. If so, make a | ||
| 1318 | * note to sort the list before dispatch. | ||
| 1319 | */ | ||
| 1320 | if (list_empty(&plug->list)) | ||
| 1321 | trace_block_plug(q); | ||
| 1322 | else if (!plug->should_sort) { | ||
| 1315 | struct request *__rq; | 1323 | struct request *__rq; |
| 1316 | 1324 | ||
| 1317 | __rq = list_entry_rq(plug->list.prev); | 1325 | __rq = list_entry_rq(plug->list.prev); |
| @@ -1327,7 +1335,7 @@ get_rq: | |||
| 1327 | } else { | 1335 | } else { |
| 1328 | spin_lock_irq(q->queue_lock); | 1336 | spin_lock_irq(q->queue_lock); |
| 1329 | add_acct_request(q, req, where); | 1337 | add_acct_request(q, req, where); |
| 1330 | __blk_run_queue(q, false); | 1338 | __blk_run_queue(q); |
| 1331 | out_unlock: | 1339 | out_unlock: |
| 1332 | spin_unlock_irq(q->queue_lock); | 1340 | spin_unlock_irq(q->queue_lock); |
| 1333 | } | 1341 | } |
| @@ -2644,6 +2652,7 @@ void blk_start_plug(struct blk_plug *plug) | |||
| 2644 | 2652 | ||
| 2645 | plug->magic = PLUG_MAGIC; | 2653 | plug->magic = PLUG_MAGIC; |
| 2646 | INIT_LIST_HEAD(&plug->list); | 2654 | INIT_LIST_HEAD(&plug->list); |
| 2655 | INIT_LIST_HEAD(&plug->cb_list); | ||
| 2647 | plug->should_sort = 0; | 2656 | plug->should_sort = 0; |
| 2648 | 2657 | ||
| 2649 | /* | 2658 | /* |
| @@ -2668,33 +2677,93 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) | |||
| 2668 | return !(rqa->q <= rqb->q); | 2677 | return !(rqa->q <= rqb->q); |
| 2669 | } | 2678 | } |
| 2670 | 2679 | ||
| 2671 | static void flush_plug_list(struct blk_plug *plug) | 2680 | /* |
| 2681 | * If 'from_schedule' is true, then postpone the dispatch of requests | ||
| 2682 | * until a safe kblockd context. We due this to avoid accidental big | ||
| 2683 | * additional stack usage in driver dispatch, in places where the originally | ||
| 2684 | * plugger did not intend it. | ||
| 2685 | */ | ||
| 2686 | static void queue_unplugged(struct request_queue *q, unsigned int depth, | ||
| 2687 | bool from_schedule) | ||
| 2688 | __releases(q->queue_lock) | ||
| 2689 | { | ||
| 2690 | trace_block_unplug(q, depth, !from_schedule); | ||
| 2691 | |||
| 2692 | /* | ||
| 2693 | * If we are punting this to kblockd, then we can safely drop | ||
| 2694 | * the queue_lock before waking kblockd (which needs to take | ||
| 2695 | * this lock). | ||
| 2696 | */ | ||
| 2697 | if (from_schedule) { | ||
| 2698 | spin_unlock(q->queue_lock); | ||
| 2699 | blk_run_queue_async(q); | ||
| 2700 | } else { | ||
| 2701 | __blk_run_queue(q); | ||
| 2702 | spin_unlock(q->queue_lock); | ||
| 2703 | } | ||
| 2704 | |||
| 2705 | } | ||
| 2706 | |||
| 2707 | static void flush_plug_callbacks(struct blk_plug *plug) | ||
| 2708 | { | ||
| 2709 | LIST_HEAD(callbacks); | ||
| 2710 | |||
| 2711 | if (list_empty(&plug->cb_list)) | ||
| 2712 | return; | ||
| 2713 | |||
| 2714 | list_splice_init(&plug->cb_list, &callbacks); | ||
| 2715 | |||
| 2716 | while (!list_empty(&callbacks)) { | ||
| 2717 | struct blk_plug_cb *cb = list_first_entry(&callbacks, | ||
| 2718 | struct blk_plug_cb, | ||
| 2719 | list); | ||
| 2720 | list_del(&cb->list); | ||
| 2721 | cb->callback(cb); | ||
| 2722 | } | ||
| 2723 | } | ||
| 2724 | |||
| 2725 | void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) | ||
| 2672 | { | 2726 | { |
| 2673 | struct request_queue *q; | 2727 | struct request_queue *q; |
| 2674 | unsigned long flags; | 2728 | unsigned long flags; |
| 2675 | struct request *rq; | 2729 | struct request *rq; |
| 2730 | LIST_HEAD(list); | ||
| 2731 | unsigned int depth; | ||
| 2676 | 2732 | ||
| 2677 | BUG_ON(plug->magic != PLUG_MAGIC); | 2733 | BUG_ON(plug->magic != PLUG_MAGIC); |
| 2678 | 2734 | ||
| 2735 | flush_plug_callbacks(plug); | ||
| 2679 | if (list_empty(&plug->list)) | 2736 | if (list_empty(&plug->list)) |
| 2680 | return; | 2737 | return; |
| 2681 | 2738 | ||
| 2682 | if (plug->should_sort) | 2739 | list_splice_init(&plug->list, &list); |
| 2683 | list_sort(NULL, &plug->list, plug_rq_cmp); | 2740 | |
| 2741 | if (plug->should_sort) { | ||
| 2742 | list_sort(NULL, &list, plug_rq_cmp); | ||
| 2743 | plug->should_sort = 0; | ||
| 2744 | } | ||
| 2684 | 2745 | ||
| 2685 | q = NULL; | 2746 | q = NULL; |
| 2747 | depth = 0; | ||
| 2748 | |||
| 2749 | /* | ||
| 2750 | * Save and disable interrupts here, to avoid doing it for every | ||
| 2751 | * queue lock we have to take. | ||
| 2752 | */ | ||
| 2686 | local_irq_save(flags); | 2753 | local_irq_save(flags); |
| 2687 | while (!list_empty(&plug->list)) { | 2754 | while (!list_empty(&list)) { |
| 2688 | rq = list_entry_rq(plug->list.next); | 2755 | rq = list_entry_rq(list.next); |
| 2689 | list_del_init(&rq->queuelist); | 2756 | list_del_init(&rq->queuelist); |
| 2690 | BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG)); | 2757 | BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG)); |
| 2691 | BUG_ON(!rq->q); | 2758 | BUG_ON(!rq->q); |
| 2692 | if (rq->q != q) { | 2759 | if (rq->q != q) { |
| 2693 | if (q) { | 2760 | /* |
| 2694 | __blk_run_queue(q, false); | 2761 | * This drops the queue lock |
| 2695 | spin_unlock(q->queue_lock); | 2762 | */ |
| 2696 | } | 2763 | if (q) |
| 2764 | queue_unplugged(q, depth, from_schedule); | ||
| 2697 | q = rq->q; | 2765 | q = rq->q; |
| 2766 | depth = 0; | ||
| 2698 | spin_lock(q->queue_lock); | 2767 | spin_lock(q->queue_lock); |
| 2699 | } | 2768 | } |
| 2700 | rq->cmd_flags &= ~REQ_ON_PLUG; | 2769 | rq->cmd_flags &= ~REQ_ON_PLUG; |
| @@ -2706,38 +2775,28 @@ static void flush_plug_list(struct blk_plug *plug) | |||
| 2706 | __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); | 2775 | __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); |
| 2707 | else | 2776 | else |
| 2708 | __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); | 2777 | __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); |
| 2709 | } | ||
| 2710 | 2778 | ||
| 2711 | if (q) { | 2779 | depth++; |
| 2712 | __blk_run_queue(q, false); | ||
| 2713 | spin_unlock(q->queue_lock); | ||
| 2714 | } | 2780 | } |
| 2715 | 2781 | ||
| 2716 | BUG_ON(!list_empty(&plug->list)); | 2782 | /* |
| 2717 | local_irq_restore(flags); | 2783 | * This drops the queue lock |
| 2718 | } | 2784 | */ |
| 2719 | 2785 | if (q) | |
| 2720 | static void __blk_finish_plug(struct task_struct *tsk, struct blk_plug *plug) | 2786 | queue_unplugged(q, depth, from_schedule); |
| 2721 | { | ||
| 2722 | flush_plug_list(plug); | ||
| 2723 | 2787 | ||
| 2724 | if (plug == tsk->plug) | 2788 | local_irq_restore(flags); |
| 2725 | tsk->plug = NULL; | ||
| 2726 | } | 2789 | } |
| 2790 | EXPORT_SYMBOL(blk_flush_plug_list); | ||
| 2727 | 2791 | ||
| 2728 | void blk_finish_plug(struct blk_plug *plug) | 2792 | void blk_finish_plug(struct blk_plug *plug) |
| 2729 | { | 2793 | { |
| 2730 | if (plug) | 2794 | blk_flush_plug_list(plug, false); |
| 2731 | __blk_finish_plug(current, plug); | ||
| 2732 | } | ||
| 2733 | EXPORT_SYMBOL(blk_finish_plug); | ||
| 2734 | 2795 | ||
| 2735 | void __blk_flush_plug(struct task_struct *tsk, struct blk_plug *plug) | 2796 | if (plug == current->plug) |
| 2736 | { | 2797 | current->plug = NULL; |
| 2737 | __blk_finish_plug(tsk, plug); | ||
| 2738 | tsk->plug = plug; | ||
| 2739 | } | 2798 | } |
| 2740 | EXPORT_SYMBOL(__blk_flush_plug); | 2799 | EXPORT_SYMBOL(blk_finish_plug); |
| 2741 | 2800 | ||
| 2742 | int __init blk_dev_init(void) | 2801 | int __init blk_dev_init(void) |
| 2743 | { | 2802 | { |
diff --git a/block/blk-exec.c b/block/blk-exec.c index 7482b7fa863b..81e31819a597 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c | |||
| @@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | |||
| 55 | WARN_ON(irqs_disabled()); | 55 | WARN_ON(irqs_disabled()); |
| 56 | spin_lock_irq(q->queue_lock); | 56 | spin_lock_irq(q->queue_lock); |
| 57 | __elv_add_request(q, rq, where); | 57 | __elv_add_request(q, rq, where); |
| 58 | __blk_run_queue(q, false); | 58 | __blk_run_queue(q); |
| 59 | /* the queue is stopped so it won't be plugged+unplugged */ | 59 | /* the queue is stopped so it won't be plugged+unplugged */ |
| 60 | if (rq->cmd_type == REQ_TYPE_PM_RESUME) | 60 | if (rq->cmd_type == REQ_TYPE_PM_RESUME) |
| 61 | q->request_fn(q); | 61 | q->request_fn(q); |
diff --git a/block/blk-flush.c b/block/blk-flush.c index eba4a2790c6c..6c9b5e189e62 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
| @@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
| 218 | * request_fn may confuse the driver. Always use kblockd. | 218 | * request_fn may confuse the driver. Always use kblockd. |
| 219 | */ | 219 | */ |
| 220 | if (queued) | 220 | if (queued) |
| 221 | __blk_run_queue(q, true); | 221 | blk_run_queue_async(q); |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | /** | 224 | /** |
| @@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error) | |||
| 274 | * the comment in flush_end_io(). | 274 | * the comment in flush_end_io(). |
| 275 | */ | 275 | */ |
| 276 | if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) | 276 | if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) |
| 277 | __blk_run_queue(q, true); | 277 | blk_run_queue_async(q); |
| 278 | } | 278 | } |
| 279 | 279 | ||
| 280 | /** | 280 | /** |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 261c75c665ae..6d735122bc59 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
| @@ -498,7 +498,6 @@ int blk_register_queue(struct gendisk *disk) | |||
| 498 | { | 498 | { |
| 499 | int ret; | 499 | int ret; |
| 500 | struct device *dev = disk_to_dev(disk); | 500 | struct device *dev = disk_to_dev(disk); |
| 501 | |||
| 502 | struct request_queue *q = disk->queue; | 501 | struct request_queue *q = disk->queue; |
| 503 | 502 | ||
| 504 | if (WARN_ON(!q)) | 503 | if (WARN_ON(!q)) |
| @@ -521,7 +520,7 @@ int blk_register_queue(struct gendisk *disk) | |||
| 521 | if (ret) { | 520 | if (ret) { |
| 522 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | 521 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
| 523 | kobject_del(&q->kobj); | 522 | kobject_del(&q->kobj); |
| 524 | blk_trace_remove_sysfs(disk_to_dev(disk)); | 523 | blk_trace_remove_sysfs(dev); |
| 525 | kobject_put(&dev->kobj); | 524 | kobject_put(&dev->kobj); |
| 526 | return ret; | 525 | return ret; |
| 527 | } | 526 | } |
diff --git a/block/blk.h b/block/blk.h index 61263463e38e..c9df8fc3c999 100644 --- a/block/blk.h +++ b/block/blk.h | |||
| @@ -22,6 +22,7 @@ void blk_rq_timed_out_timer(unsigned long data); | |||
| 22 | void blk_delete_timer(struct request *); | 22 | void blk_delete_timer(struct request *); |
| 23 | void blk_add_timer(struct request *); | 23 | void blk_add_timer(struct request *); |
| 24 | void __generic_unplug_device(struct request_queue *); | 24 | void __generic_unplug_device(struct request_queue *); |
| 25 | void blk_run_queue_async(struct request_queue *q); | ||
| 25 | 26 | ||
| 26 | /* | 27 | /* |
| 27 | * Internal atomic flags for request handling | 28 | * Internal atomic flags for request handling |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 3be881ec95ad..46b0a1d1d925 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
| @@ -3368,7 +3368,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
| 3368 | cfqd->busy_queues > 1) { | 3368 | cfqd->busy_queues > 1) { |
| 3369 | cfq_del_timer(cfqd, cfqq); | 3369 | cfq_del_timer(cfqd, cfqq); |
| 3370 | cfq_clear_cfqq_wait_request(cfqq); | 3370 | cfq_clear_cfqq_wait_request(cfqq); |
| 3371 | __blk_run_queue(cfqd->queue, false); | 3371 | __blk_run_queue(cfqd->queue); |
| 3372 | } else { | 3372 | } else { |
| 3373 | cfq_blkiocg_update_idle_time_stats( | 3373 | cfq_blkiocg_update_idle_time_stats( |
| 3374 | &cfqq->cfqg->blkg); | 3374 | &cfqq->cfqg->blkg); |
| @@ -3383,7 +3383,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
| 3383 | * this new queue is RT and the current one is BE | 3383 | * this new queue is RT and the current one is BE |
| 3384 | */ | 3384 | */ |
| 3385 | cfq_preempt_queue(cfqd, cfqq); | 3385 | cfq_preempt_queue(cfqd, cfqq); |
| 3386 | __blk_run_queue(cfqd->queue, false); | 3386 | __blk_run_queue(cfqd->queue); |
| 3387 | } | 3387 | } |
| 3388 | } | 3388 | } |
| 3389 | 3389 | ||
| @@ -3743,7 +3743,7 @@ static void cfq_kick_queue(struct work_struct *work) | |||
| 3743 | struct request_queue *q = cfqd->queue; | 3743 | struct request_queue *q = cfqd->queue; |
| 3744 | 3744 | ||
| 3745 | spin_lock_irq(q->queue_lock); | 3745 | spin_lock_irq(q->queue_lock); |
| 3746 | __blk_run_queue(cfqd->queue, false); | 3746 | __blk_run_queue(cfqd->queue); |
| 3747 | spin_unlock_irq(q->queue_lock); | 3747 | spin_unlock_irq(q->queue_lock); |
| 3748 | } | 3748 | } |
| 3749 | 3749 | ||
diff --git a/block/elevator.c b/block/elevator.c index 0cdb4e7ebab4..6f6abc08bb56 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
| @@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q) | |||
| 642 | */ | 642 | */ |
| 643 | elv_drain_elevator(q); | 643 | elv_drain_elevator(q); |
| 644 | while (q->rq.elvpriv) { | 644 | while (q->rq.elvpriv) { |
| 645 | __blk_run_queue(q, false); | 645 | __blk_run_queue(q); |
| 646 | spin_unlock_irq(q->queue_lock); | 646 | spin_unlock_irq(q->queue_lock); |
| 647 | msleep(10); | 647 | msleep(10); |
| 648 | spin_lock_irq(q->queue_lock); | 648 | spin_lock_irq(q->queue_lock); |
| @@ -695,7 +695,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where) | |||
| 695 | * with anything. There's no point in delaying queue | 695 | * with anything. There's no point in delaying queue |
| 696 | * processing. | 696 | * processing. |
| 697 | */ | 697 | */ |
| 698 | __blk_run_queue(q, false); | 698 | __blk_run_queue(q); |
| 699 | break; | 699 | break; |
| 700 | 700 | ||
| 701 | case ELEVATOR_INSERT_SORT_MERGE: | 701 | case ELEVATOR_INSERT_SORT_MERGE: |
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 821040503154..7025593a58c8 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c | |||
| @@ -214,7 +214,7 @@ static int amba_pm_resume_noirq(struct device *dev) | |||
| 214 | 214 | ||
| 215 | #endif /* !CONFIG_SUSPEND */ | 215 | #endif /* !CONFIG_SUSPEND */ |
| 216 | 216 | ||
| 217 | #ifdef CONFIG_HIBERNATION | 217 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 218 | 218 | ||
| 219 | static int amba_pm_freeze(struct device *dev) | 219 | static int amba_pm_freeze(struct device *dev) |
| 220 | { | 220 | { |
| @@ -352,7 +352,7 @@ static int amba_pm_restore_noirq(struct device *dev) | |||
| 352 | return ret; | 352 | return ret; |
| 353 | } | 353 | } |
| 354 | 354 | ||
| 355 | #else /* !CONFIG_HIBERNATION */ | 355 | #else /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 356 | 356 | ||
| 357 | #define amba_pm_freeze NULL | 357 | #define amba_pm_freeze NULL |
| 358 | #define amba_pm_thaw NULL | 358 | #define amba_pm_thaw NULL |
| @@ -363,7 +363,7 @@ static int amba_pm_restore_noirq(struct device *dev) | |||
| 363 | #define amba_pm_poweroff_noirq NULL | 363 | #define amba_pm_poweroff_noirq NULL |
| 364 | #define amba_pm_restore_noirq NULL | 364 | #define amba_pm_restore_noirq NULL |
| 365 | 365 | ||
| 366 | #endif /* !CONFIG_HIBERNATION */ | 366 | #endif /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 367 | 367 | ||
| 368 | #ifdef CONFIG_PM | 368 | #ifdef CONFIG_PM |
| 369 | 369 | ||
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index f051cfff18af..9e0e4fc24c46 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
| @@ -149,6 +149,7 @@ static void platform_device_release(struct device *dev) | |||
| 149 | 149 | ||
| 150 | of_device_node_put(&pa->pdev.dev); | 150 | of_device_node_put(&pa->pdev.dev); |
| 151 | kfree(pa->pdev.dev.platform_data); | 151 | kfree(pa->pdev.dev.platform_data); |
| 152 | kfree(pa->pdev.mfd_cell); | ||
| 152 | kfree(pa->pdev.resource); | 153 | kfree(pa->pdev.resource); |
| 153 | kfree(pa); | 154 | kfree(pa); |
| 154 | } | 155 | } |
| @@ -771,7 +772,7 @@ int __weak platform_pm_resume_noirq(struct device *dev) | |||
| 771 | 772 | ||
| 772 | #endif /* !CONFIG_SUSPEND */ | 773 | #endif /* !CONFIG_SUSPEND */ |
| 773 | 774 | ||
| 774 | #ifdef CONFIG_HIBERNATION | 775 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 775 | 776 | ||
| 776 | static int platform_pm_freeze(struct device *dev) | 777 | static int platform_pm_freeze(struct device *dev) |
| 777 | { | 778 | { |
| @@ -909,7 +910,7 @@ static int platform_pm_restore_noirq(struct device *dev) | |||
| 909 | return ret; | 910 | return ret; |
| 910 | } | 911 | } |
| 911 | 912 | ||
| 912 | #else /* !CONFIG_HIBERNATION */ | 913 | #else /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 913 | 914 | ||
| 914 | #define platform_pm_freeze NULL | 915 | #define platform_pm_freeze NULL |
| 915 | #define platform_pm_thaw NULL | 916 | #define platform_pm_thaw NULL |
| @@ -920,7 +921,7 @@ static int platform_pm_restore_noirq(struct device *dev) | |||
| 920 | #define platform_pm_poweroff_noirq NULL | 921 | #define platform_pm_poweroff_noirq NULL |
| 921 | #define platform_pm_restore_noirq NULL | 922 | #define platform_pm_restore_noirq NULL |
| 922 | 923 | ||
| 923 | #endif /* !CONFIG_HIBERNATION */ | 924 | #endif /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 924 | 925 | ||
| 925 | #ifdef CONFIG_PM_RUNTIME | 926 | #ifdef CONFIG_PM_RUNTIME |
| 926 | 927 | ||
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 052dc53eef38..fbc5b6e7c591 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -233,7 +233,7 @@ static int pm_op(struct device *dev, | |||
| 233 | } | 233 | } |
| 234 | break; | 234 | break; |
| 235 | #endif /* CONFIG_SUSPEND */ | 235 | #endif /* CONFIG_SUSPEND */ |
| 236 | #ifdef CONFIG_HIBERNATION | 236 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 237 | case PM_EVENT_FREEZE: | 237 | case PM_EVENT_FREEZE: |
| 238 | case PM_EVENT_QUIESCE: | 238 | case PM_EVENT_QUIESCE: |
| 239 | if (ops->freeze) { | 239 | if (ops->freeze) { |
| @@ -260,7 +260,7 @@ static int pm_op(struct device *dev, | |||
| 260 | suspend_report_result(ops->restore, error); | 260 | suspend_report_result(ops->restore, error); |
| 261 | } | 261 | } |
| 262 | break; | 262 | break; |
| 263 | #endif /* CONFIG_HIBERNATION */ | 263 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
| 264 | default: | 264 | default: |
| 265 | error = -EINVAL; | 265 | error = -EINVAL; |
| 266 | } | 266 | } |
| @@ -308,7 +308,7 @@ static int pm_noirq_op(struct device *dev, | |||
| 308 | } | 308 | } |
| 309 | break; | 309 | break; |
| 310 | #endif /* CONFIG_SUSPEND */ | 310 | #endif /* CONFIG_SUSPEND */ |
| 311 | #ifdef CONFIG_HIBERNATION | 311 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 312 | case PM_EVENT_FREEZE: | 312 | case PM_EVENT_FREEZE: |
| 313 | case PM_EVENT_QUIESCE: | 313 | case PM_EVENT_QUIESCE: |
| 314 | if (ops->freeze_noirq) { | 314 | if (ops->freeze_noirq) { |
| @@ -335,7 +335,7 @@ static int pm_noirq_op(struct device *dev, | |||
| 335 | suspend_report_result(ops->restore_noirq, error); | 335 | suspend_report_result(ops->restore_noirq, error); |
| 336 | } | 336 | } |
| 337 | break; | 337 | break; |
| 338 | #endif /* CONFIG_HIBERNATION */ | 338 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
| 339 | default: | 339 | default: |
| 340 | error = -EINVAL; | 340 | error = -EINVAL; |
| 341 | } | 341 | } |
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index a6feb78c404c..c58f691ec3ce 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
| @@ -96,6 +96,7 @@ config DRM_I915 | |||
| 96 | # i915 depends on ACPI_VIDEO when ACPI is enabled | 96 | # i915 depends on ACPI_VIDEO when ACPI is enabled |
| 97 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick | 97 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick |
| 98 | select BACKLIGHT_CLASS_DEVICE if ACPI | 98 | select BACKLIGHT_CLASS_DEVICE if ACPI |
| 99 | select VIDEO_OUTPUT_CONTROL if ACPI | ||
| 99 | select INPUT if ACPI | 100 | select INPUT if ACPI |
| 100 | select ACPI_VIDEO if ACPI | 101 | select ACPI_VIDEO if ACPI |
| 101 | select ACPI_BUTTON if ACPI | 102 | select ACPI_BUTTON if ACPI |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 8314a49b6b9a..90aef64b76f2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
| @@ -269,7 +269,7 @@ struct init_tbl_entry { | |||
| 269 | int (*handler)(struct nvbios *, uint16_t, struct init_exec *); | 269 | int (*handler)(struct nvbios *, uint16_t, struct init_exec *); |
| 270 | }; | 270 | }; |
| 271 | 271 | ||
| 272 | static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *); | 272 | static int parse_init_table(struct nvbios *, uint16_t, struct init_exec *); |
| 273 | 273 | ||
| 274 | #define MACRO_INDEX_SIZE 2 | 274 | #define MACRO_INDEX_SIZE 2 |
| 275 | #define MACRO_SIZE 8 | 275 | #define MACRO_SIZE 8 |
| @@ -2011,6 +2011,27 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2011 | } | 2011 | } |
| 2012 | 2012 | ||
| 2013 | static int | 2013 | static int |
| 2014 | init_jump(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | ||
| 2015 | { | ||
| 2016 | /* | ||
| 2017 | * INIT_JUMP opcode: 0x5C ('\') | ||
| 2018 | * | ||
| 2019 | * offset (8 bit): opcode | ||
| 2020 | * offset + 1 (16 bit): offset (in bios) | ||
| 2021 | * | ||
| 2022 | * Continue execution of init table from 'offset' | ||
| 2023 | */ | ||
| 2024 | |||
| 2025 | uint16_t jmp_offset = ROM16(bios->data[offset + 1]); | ||
| 2026 | |||
| 2027 | if (!iexec->execute) | ||
| 2028 | return 3; | ||
| 2029 | |||
| 2030 | BIOSLOG(bios, "0x%04X: Jump to 0x%04X\n", offset, jmp_offset); | ||
| 2031 | return jmp_offset - offset; | ||
| 2032 | } | ||
| 2033 | |||
| 2034 | static int | ||
| 2014 | init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2035 | init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2015 | { | 2036 | { |
| 2016 | /* | 2037 | /* |
| @@ -3659,6 +3680,7 @@ static struct init_tbl_entry itbl_entry[] = { | |||
| 3659 | { "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence }, | 3680 | { "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence }, |
| 3660 | /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */ | 3681 | /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */ |
| 3661 | { "INIT_SUB_DIRECT" , 0x5B, init_sub_direct }, | 3682 | { "INIT_SUB_DIRECT" , 0x5B, init_sub_direct }, |
| 3683 | { "INIT_JUMP" , 0x5C, init_jump }, | ||
| 3662 | { "INIT_I2C_IF" , 0x5E, init_i2c_if }, | 3684 | { "INIT_I2C_IF" , 0x5E, init_i2c_if }, |
| 3663 | { "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg }, | 3685 | { "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg }, |
| 3664 | { "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io }, | 3686 | { "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io }, |
| @@ -3700,8 +3722,7 @@ static struct init_tbl_entry itbl_entry[] = { | |||
| 3700 | #define MAX_TABLE_OPS 1000 | 3722 | #define MAX_TABLE_OPS 1000 |
| 3701 | 3723 | ||
| 3702 | static int | 3724 | static int |
| 3703 | parse_init_table(struct nvbios *bios, unsigned int offset, | 3725 | parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 3704 | struct init_exec *iexec) | ||
| 3705 | { | 3726 | { |
| 3706 | /* | 3727 | /* |
| 3707 | * Parses all commands in an init table. | 3728 | * Parses all commands in an init table. |
| @@ -6333,6 +6354,32 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) | |||
| 6333 | } | 6354 | } |
| 6334 | } | 6355 | } |
| 6335 | 6356 | ||
| 6357 | /* XFX GT-240X-YA | ||
| 6358 | * | ||
| 6359 | * So many things wrong here, replace the entire encoder table.. | ||
| 6360 | */ | ||
| 6361 | if (nv_match_device(dev, 0x0ca3, 0x1682, 0x3003)) { | ||
| 6362 | if (idx == 0) { | ||
| 6363 | *conn = 0x02001300; /* VGA, connector 1 */ | ||
| 6364 | *conf = 0x00000028; | ||
| 6365 | } else | ||
| 6366 | if (idx == 1) { | ||
| 6367 | *conn = 0x01010312; /* DVI, connector 0 */ | ||
| 6368 | *conf = 0x00020030; | ||
| 6369 | } else | ||
| 6370 | if (idx == 2) { | ||
| 6371 | *conn = 0x01010310; /* VGA, connector 0 */ | ||
| 6372 | *conf = 0x00000028; | ||
| 6373 | } else | ||
| 6374 | if (idx == 3) { | ||
| 6375 | *conn = 0x02022362; /* HDMI, connector 2 */ | ||
| 6376 | *conf = 0x00020010; | ||
| 6377 | } else { | ||
| 6378 | *conn = 0x0000000e; /* EOL */ | ||
| 6379 | *conf = 0x00000000; | ||
| 6380 | } | ||
| 6381 | } | ||
| 6382 | |||
| 6336 | return true; | 6383 | return true; |
| 6337 | } | 6384 | } |
| 6338 | 6385 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 57e5302503db..856d56a98d1e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
| @@ -1190,7 +1190,7 @@ extern int nv50_graph_load_context(struct nouveau_channel *); | |||
| 1190 | extern int nv50_graph_unload_context(struct drm_device *); | 1190 | extern int nv50_graph_unload_context(struct drm_device *); |
| 1191 | extern int nv50_grctx_init(struct nouveau_grctx *); | 1191 | extern int nv50_grctx_init(struct nouveau_grctx *); |
| 1192 | extern void nv50_graph_tlb_flush(struct drm_device *dev); | 1192 | extern void nv50_graph_tlb_flush(struct drm_device *dev); |
| 1193 | extern void nv86_graph_tlb_flush(struct drm_device *dev); | 1193 | extern void nv84_graph_tlb_flush(struct drm_device *dev); |
| 1194 | extern struct nouveau_enum nv50_data_error_names[]; | 1194 | extern struct nouveau_enum nv50_data_error_names[]; |
| 1195 | 1195 | ||
| 1196 | /* nvc0_graph.c */ | 1196 | /* nvc0_graph.c */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 2683377f4131..78f467fe30be 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
| @@ -552,6 +552,7 @@ nouveau_mem_timing_init(struct drm_device *dev) | |||
| 552 | u8 tRC; /* Byte 9 */ | 552 | u8 tRC; /* Byte 9 */ |
| 553 | u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14; | 553 | u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14; |
| 554 | u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21; | 554 | u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21; |
| 555 | u8 magic_number = 0; /* Yeah... sorry*/ | ||
| 555 | u8 *mem = NULL, *entry; | 556 | u8 *mem = NULL, *entry; |
| 556 | int i, recordlen, entries; | 557 | int i, recordlen, entries; |
| 557 | 558 | ||
| @@ -596,6 +597,12 @@ nouveau_mem_timing_init(struct drm_device *dev) | |||
| 596 | if (!memtimings->timing) | 597 | if (!memtimings->timing) |
| 597 | return; | 598 | return; |
| 598 | 599 | ||
| 600 | /* Get "some number" from the timing reg for NV_40 | ||
| 601 | * Used in calculations later */ | ||
| 602 | if(dev_priv->card_type == NV_40) { | ||
| 603 | magic_number = (nv_rd32(dev,0x100228) & 0x0f000000) >> 24; | ||
| 604 | } | ||
| 605 | |||
| 599 | entry = mem + mem[1]; | 606 | entry = mem + mem[1]; |
| 600 | for (i = 0; i < entries; i++, entry += recordlen) { | 607 | for (i = 0; i < entries; i++, entry += recordlen) { |
| 601 | struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i]; | 608 | struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i]; |
| @@ -635,36 +642,51 @@ nouveau_mem_timing_init(struct drm_device *dev) | |||
| 635 | 642 | ||
| 636 | /* XXX: I don't trust the -1's and +1's... they must come | 643 | /* XXX: I don't trust the -1's and +1's... they must come |
| 637 | * from somewhere! */ | 644 | * from somewhere! */ |
| 638 | timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 | | 645 | timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 | |
| 639 | tUNK_18 << 16 | | 646 | tUNK_18 << 16 | |
| 640 | (tUNK_1 + tUNK_19 + 1) << 8 | | 647 | (tUNK_1 + tUNK_19 + 1 + magic_number) << 8; |
| 641 | (tUNK_2 - 1)); | 648 | if(dev_priv->chipset == 0xa8) { |
| 649 | timing->reg_100224 |= (tUNK_2 - 1); | ||
| 650 | } else { | ||
| 651 | timing->reg_100224 |= (tUNK_2 + 2 - magic_number); | ||
| 652 | } | ||
| 642 | 653 | ||
| 643 | timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); | 654 | timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); |
| 644 | if(recordlen > 19) { | 655 | if(dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) { |
| 645 | timing->reg_100228 += (tUNK_19 - 1) << 24; | 656 | timing->reg_100228 |= (tUNK_19 - 1) << 24; |
| 646 | }/* I cannot back-up this else-statement right now | 657 | } |
| 647 | else { | 658 | |
| 648 | timing->reg_100228 += tUNK_12 << 24; | 659 | if(dev_priv->card_type == NV_40) { |
| 649 | }*/ | 660 | /* NV40: don't know what the rest of the regs are.. |
| 650 | 661 | * And don't need to know either */ | |
| 651 | /* XXX: reg_10022c */ | 662 | timing->reg_100228 |= 0x20200000 | magic_number << 24; |
| 652 | timing->reg_10022c = tUNK_2 - 1; | 663 | } else if(dev_priv->card_type >= NV_50) { |
| 653 | 664 | /* XXX: reg_10022c */ | |
| 654 | timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | | 665 | timing->reg_10022c = tUNK_2 - 1; |
| 655 | tUNK_13 << 8 | tUNK_13); | 666 | |
| 656 | 667 | timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | | |
| 657 | /* XXX: +6? */ | 668 | tUNK_13 << 8 | tUNK_13); |
| 658 | timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); | 669 | |
| 659 | timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; | 670 | timing->reg_100234 = (tRAS << 24 | tRC); |
| 660 | 671 | timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; | |
| 661 | /* XXX; reg_100238, reg_10023c | 672 | |
| 662 | * reg: 0x00?????? | 673 | if(dev_priv->chipset < 0xa3) { |
| 663 | * reg_10023c: | 674 | timing->reg_100234 |= (tUNK_2 + 2) << 8; |
| 664 | * 0 for pre-NV50 cards | 675 | } else { |
| 665 | * 0x????0202 for NV50+ cards (empirical evidence) */ | 676 | /* XXX: +6? */ |
| 666 | if(dev_priv->card_type >= NV_50) { | 677 | timing->reg_100234 |= (tUNK_19 + 6) << 8; |
| 678 | } | ||
| 679 | |||
| 680 | /* XXX; reg_100238, reg_10023c | ||
| 681 | * reg_100238: 0x00?????? | ||
| 682 | * reg_10023c: 0x!!??0202 for NV50+ cards (empirical evidence) */ | ||
| 667 | timing->reg_10023c = 0x202; | 683 | timing->reg_10023c = 0x202; |
| 684 | if(dev_priv->chipset < 0xa3) { | ||
| 685 | timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16; | ||
| 686 | } else { | ||
| 687 | /* currently unknown | ||
| 688 | * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */ | ||
| 689 | } | ||
| 668 | } | 690 | } |
| 669 | 691 | ||
| 670 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, | 692 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, |
| @@ -675,7 +697,7 @@ nouveau_mem_timing_init(struct drm_device *dev) | |||
| 675 | timing->reg_100238, timing->reg_10023c); | 697 | timing->reg_100238, timing->reg_10023c); |
| 676 | } | 698 | } |
| 677 | 699 | ||
| 678 | memtimings->nr_timing = entries; | 700 | memtimings->nr_timing = entries; |
| 679 | memtimings->supported = true; | 701 | memtimings->supported = true; |
| 680 | } | 702 | } |
| 681 | 703 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c index ac62a1b8c4fc..670e3cb697ec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_perf.c +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c | |||
| @@ -134,7 +134,7 @@ nouveau_perf_init(struct drm_device *dev) | |||
| 134 | case 0x13: | 134 | case 0x13: |
| 135 | case 0x15: | 135 | case 0x15: |
| 136 | perflvl->fanspeed = entry[55]; | 136 | perflvl->fanspeed = entry[55]; |
| 137 | perflvl->voltage = entry[56]; | 137 | perflvl->voltage = (recordlen > 56) ? entry[56] : 0; |
| 138 | perflvl->core = ROM32(entry[1]) * 10; | 138 | perflvl->core = ROM32(entry[1]) * 10; |
| 139 | perflvl->memory = ROM32(entry[5]) * 20; | 139 | perflvl->memory = ROM32(entry[5]) * 20; |
| 140 | break; | 140 | break; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 5bb2859001e2..6e2b1a6caa2d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
| @@ -376,15 +376,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
| 376 | engine->graph.destroy_context = nv50_graph_destroy_context; | 376 | engine->graph.destroy_context = nv50_graph_destroy_context; |
| 377 | engine->graph.load_context = nv50_graph_load_context; | 377 | engine->graph.load_context = nv50_graph_load_context; |
| 378 | engine->graph.unload_context = nv50_graph_unload_context; | 378 | engine->graph.unload_context = nv50_graph_unload_context; |
| 379 | if (dev_priv->chipset != 0x86) | 379 | if (dev_priv->chipset == 0x50 || |
| 380 | dev_priv->chipset == 0xac) | ||
| 380 | engine->graph.tlb_flush = nv50_graph_tlb_flush; | 381 | engine->graph.tlb_flush = nv50_graph_tlb_flush; |
| 381 | else { | 382 | else |
| 382 | /* from what i can see nvidia do this on every | 383 | engine->graph.tlb_flush = nv84_graph_tlb_flush; |
| 383 | * pre-NVA3 board except NVAC, but, we've only | ||
| 384 | * ever seen problems on NV86 | ||
| 385 | */ | ||
| 386 | engine->graph.tlb_flush = nv86_graph_tlb_flush; | ||
| 387 | } | ||
| 388 | engine->fifo.channels = 128; | 384 | engine->fifo.channels = 128; |
| 389 | engine->fifo.init = nv50_fifo_init; | 385 | engine->fifo.init = nv50_fifo_init; |
| 390 | engine->fifo.takedown = nv50_fifo_takedown; | 386 | engine->fifo.takedown = nv50_fifo_takedown; |
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index c82db37d9f41..12098bf839c4 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c | |||
| @@ -581,12 +581,13 @@ static void nv04_dfp_restore(struct drm_encoder *encoder) | |||
| 581 | int head = nv_encoder->restore.head; | 581 | int head = nv_encoder->restore.head; |
| 582 | 582 | ||
| 583 | if (nv_encoder->dcb->type == OUTPUT_LVDS) { | 583 | if (nv_encoder->dcb->type == OUTPUT_LVDS) { |
| 584 | struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode; | 584 | struct nouveau_connector *connector = |
| 585 | if (native_mode) | 585 | nouveau_encoder_connector_get(nv_encoder); |
| 586 | call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON, | 586 | |
| 587 | native_mode->clock); | 587 | if (connector && connector->native_mode) |
| 588 | else | 588 | call_lvds_script(dev, nv_encoder->dcb, head, |
| 589 | NV_ERROR(dev, "Not restoring LVDS without native mode\n"); | 589 | LVDS_PANEL_ON, |
| 590 | connector->native_mode->clock); | ||
| 590 | 591 | ||
| 591 | } else if (nv_encoder->dcb->type == OUTPUT_TMDS) { | 592 | } else if (nv_encoder->dcb->type == OUTPUT_TMDS) { |
| 592 | int clock = nouveau_hw_pllvals_to_clk | 593 | int clock = nouveau_hw_pllvals_to_clk |
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 2b9984027f41..a19ccaa025b3 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
| @@ -469,9 +469,6 @@ nv50_crtc_wait_complete(struct drm_crtc *crtc) | |||
| 469 | 469 | ||
| 470 | start = ptimer->read(dev); | 470 | start = ptimer->read(dev); |
| 471 | do { | 471 | do { |
| 472 | nv_wr32(dev, 0x61002c, 0x370); | ||
| 473 | nv_wr32(dev, 0x000140, 1); | ||
| 474 | |||
| 475 | if (nv_ro32(disp->ntfy, 0x000)) | 472 | if (nv_ro32(disp->ntfy, 0x000)) |
| 476 | return 0; | 473 | return 0; |
| 477 | } while (ptimer->read(dev) - start < 2000000000ULL); | 474 | } while (ptimer->read(dev) - start < 2000000000ULL); |
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c index a2cfaa691e9b..c8e83c1a4de8 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.c +++ b/drivers/gpu/drm/nouveau/nv50_evo.c | |||
| @@ -186,6 +186,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo) | |||
| 186 | nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); | 186 | nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); |
| 187 | 187 | ||
| 188 | evo->dma.max = (4096/4) - 2; | 188 | evo->dma.max = (4096/4) - 2; |
| 189 | evo->dma.max &= ~7; | ||
| 189 | evo->dma.put = 0; | 190 | evo->dma.put = 0; |
| 190 | evo->dma.cur = evo->dma.put; | 191 | evo->dma.cur = evo->dma.put; |
| 191 | evo->dma.free = evo->dma.max - evo->dma.cur; | 192 | evo->dma.free = evo->dma.max - evo->dma.cur; |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 8675b00caf18..b02a5b1e7d37 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
| @@ -503,7 +503,7 @@ nv50_graph_tlb_flush(struct drm_device *dev) | |||
| 503 | } | 503 | } |
| 504 | 504 | ||
| 505 | void | 505 | void |
| 506 | nv86_graph_tlb_flush(struct drm_device *dev) | 506 | nv84_graph_tlb_flush(struct drm_device *dev) |
| 507 | { | 507 | { |
| 508 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 508 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 509 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; | 509 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; |
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c index 69af0ba7edd3..a0a2a0277f73 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vm.c +++ b/drivers/gpu/drm/nouveau/nvc0_vm.c | |||
| @@ -104,20 +104,26 @@ nvc0_vm_flush(struct nouveau_vm *vm) | |||
| 104 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | 104 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; |
| 105 | struct drm_device *dev = vm->dev; | 105 | struct drm_device *dev = vm->dev; |
| 106 | struct nouveau_vm_pgd *vpgd; | 106 | struct nouveau_vm_pgd *vpgd; |
| 107 | u32 r100c80, engine; | 107 | u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; |
| 108 | 108 | ||
| 109 | pinstmem->flush(vm->dev); | 109 | pinstmem->flush(vm->dev); |
| 110 | 110 | ||
| 111 | if (vm == dev_priv->chan_vm) | 111 | spin_lock(&dev_priv->ramin_lock); |
| 112 | engine = 1; | ||
| 113 | else | ||
| 114 | engine = 5; | ||
| 115 | |||
| 116 | list_for_each_entry(vpgd, &vm->pgd_list, head) { | 112 | list_for_each_entry(vpgd, &vm->pgd_list, head) { |
| 117 | r100c80 = nv_rd32(dev, 0x100c80); | 113 | /* looks like maybe a "free flush slots" counter, the |
| 114 | * faster you write to 0x100cbc to more it decreases | ||
| 115 | */ | ||
| 116 | if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) { | ||
| 117 | NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n", | ||
| 118 | nv_rd32(dev, 0x100c80), engine); | ||
| 119 | } | ||
| 118 | nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8); | 120 | nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8); |
| 119 | nv_wr32(dev, 0x100cbc, 0x80000000 | engine); | 121 | nv_wr32(dev, 0x100cbc, 0x80000000 | engine); |
| 120 | if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80)) | 122 | /* wait for flush to be queued? */ |
| 121 | NV_ERROR(dev, "vm flush timeout eng %d\n", engine); | 123 | if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) { |
| 124 | NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n", | ||
| 125 | nv_rd32(dev, 0x100c80), engine); | ||
| 126 | } | ||
| 122 | } | 127 | } |
| 128 | spin_unlock(&dev_priv->ramin_lock); | ||
| 123 | } | 129 | } |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 258fa5e7a2d9..d71d375149f8 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include "atom.h" | 32 | #include "atom.h" |
| 33 | #include "atom-names.h" | 33 | #include "atom-names.h" |
| 34 | #include "atom-bits.h" | 34 | #include "atom-bits.h" |
| 35 | #include "radeon.h" | ||
| 35 | 36 | ||
| 36 | #define ATOM_COND_ABOVE 0 | 37 | #define ATOM_COND_ABOVE 0 |
| 37 | #define ATOM_COND_ABOVEOREQUAL 1 | 38 | #define ATOM_COND_ABOVEOREQUAL 1 |
| @@ -101,7 +102,9 @@ static void debug_print_spaces(int n) | |||
| 101 | static uint32_t atom_iio_execute(struct atom_context *ctx, int base, | 102 | static uint32_t atom_iio_execute(struct atom_context *ctx, int base, |
| 102 | uint32_t index, uint32_t data) | 103 | uint32_t index, uint32_t data) |
| 103 | { | 104 | { |
| 105 | struct radeon_device *rdev = ctx->card->dev->dev_private; | ||
| 104 | uint32_t temp = 0xCDCDCDCD; | 106 | uint32_t temp = 0xCDCDCDCD; |
| 107 | |||
| 105 | while (1) | 108 | while (1) |
| 106 | switch (CU8(base)) { | 109 | switch (CU8(base)) { |
| 107 | case ATOM_IIO_NOP: | 110 | case ATOM_IIO_NOP: |
| @@ -112,7 +115,8 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, | |||
| 112 | base += 3; | 115 | base += 3; |
| 113 | break; | 116 | break; |
| 114 | case ATOM_IIO_WRITE: | 117 | case ATOM_IIO_WRITE: |
| 115 | (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); | 118 | if (rdev->family == CHIP_RV515) |
| 119 | (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); | ||
| 116 | ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); | 120 | ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); |
| 117 | base += 3; | 121 | base += 3; |
| 118 | break; | 122 | break; |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index b41ec59c7100..9d516a8c4dfa 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -531,6 +531,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 531 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 531 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
| 532 | else | 532 | else |
| 533 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | 533 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; |
| 534 | |||
| 535 | if ((rdev->family == CHIP_R600) || | ||
| 536 | (rdev->family == CHIP_RV610) || | ||
| 537 | (rdev->family == CHIP_RV630) || | ||
| 538 | (rdev->family == CHIP_RV670)) | ||
| 539 | pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | ||
| 534 | } else { | 540 | } else { |
| 535 | pll->flags |= RADEON_PLL_LEGACY; | 541 | pll->flags |= RADEON_PLL_LEGACY; |
| 536 | 542 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 0b0cc74c08c0..3453910ee0f3 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -120,11 +120,16 @@ void evergreen_pm_misc(struct radeon_device *rdev) | |||
| 120 | struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; | 120 | struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; |
| 121 | struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; | 121 | struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; |
| 122 | 122 | ||
| 123 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { | 123 | if (voltage->type == VOLTAGE_SW) { |
| 124 | if (voltage->voltage != rdev->pm.current_vddc) { | 124 | if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) { |
| 125 | radeon_atom_set_voltage(rdev, voltage->voltage); | 125 | radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); |
| 126 | rdev->pm.current_vddc = voltage->voltage; | 126 | rdev->pm.current_vddc = voltage->voltage; |
| 127 | DRM_DEBUG("Setting: v: %d\n", voltage->voltage); | 127 | DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage); |
| 128 | } | ||
| 129 | if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) { | ||
| 130 | radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI); | ||
| 131 | rdev->pm.current_vddci = voltage->vddci; | ||
| 132 | DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci); | ||
| 128 | } | 133 | } |
| 129 | } | 134 | } |
| 130 | } | 135 | } |
| @@ -3036,9 +3041,6 @@ int evergreen_init(struct radeon_device *rdev) | |||
| 3036 | { | 3041 | { |
| 3037 | int r; | 3042 | int r; |
| 3038 | 3043 | ||
| 3039 | r = radeon_dummy_page_init(rdev); | ||
| 3040 | if (r) | ||
| 3041 | return r; | ||
| 3042 | /* This don't do much */ | 3044 | /* This don't do much */ |
| 3043 | r = radeon_gem_init(rdev); | 3045 | r = radeon_gem_init(rdev); |
| 3044 | if (r) | 3046 | if (r) |
| @@ -3150,7 +3152,6 @@ void evergreen_fini(struct radeon_device *rdev) | |||
| 3150 | radeon_atombios_fini(rdev); | 3152 | radeon_atombios_fini(rdev); |
| 3151 | kfree(rdev->bios); | 3153 | kfree(rdev->bios); |
| 3152 | rdev->bios = NULL; | 3154 | rdev->bios = NULL; |
| 3153 | radeon_dummy_page_fini(rdev); | ||
| 3154 | } | 3155 | } |
| 3155 | 3156 | ||
| 3156 | static void evergreen_pcie_gen2_enable(struct radeon_device *rdev) | 3157 | static void evergreen_pcie_gen2_enable(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index be271c42de4d..15d58292677a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -587,7 +587,7 @@ void r600_pm_misc(struct radeon_device *rdev) | |||
| 587 | 587 | ||
| 588 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { | 588 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { |
| 589 | if (voltage->voltage != rdev->pm.current_vddc) { | 589 | if (voltage->voltage != rdev->pm.current_vddc) { |
| 590 | radeon_atom_set_voltage(rdev, voltage->voltage); | 590 | radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); |
| 591 | rdev->pm.current_vddc = voltage->voltage; | 591 | rdev->pm.current_vddc = voltage->voltage; |
| 592 | DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); | 592 | DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); |
| 593 | } | 593 | } |
| @@ -2509,9 +2509,6 @@ int r600_init(struct radeon_device *rdev) | |||
| 2509 | { | 2509 | { |
| 2510 | int r; | 2510 | int r; |
| 2511 | 2511 | ||
| 2512 | r = radeon_dummy_page_init(rdev); | ||
| 2513 | if (r) | ||
| 2514 | return r; | ||
| 2515 | if (r600_debugfs_mc_info_init(rdev)) { | 2512 | if (r600_debugfs_mc_info_init(rdev)) { |
| 2516 | DRM_ERROR("Failed to register debugfs file for mc !\n"); | 2513 | DRM_ERROR("Failed to register debugfs file for mc !\n"); |
| 2517 | } | 2514 | } |
| @@ -2625,7 +2622,6 @@ void r600_fini(struct radeon_device *rdev) | |||
| 2625 | radeon_atombios_fini(rdev); | 2622 | radeon_atombios_fini(rdev); |
| 2626 | kfree(rdev->bios); | 2623 | kfree(rdev->bios); |
| 2627 | rdev->bios = NULL; | 2624 | rdev->bios = NULL; |
| 2628 | radeon_dummy_page_fini(rdev); | ||
| 2629 | } | 2625 | } |
| 2630 | 2626 | ||
| 2631 | 2627 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 93f536594c73..ba643b576054 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -177,7 +177,7 @@ void radeon_pm_suspend(struct radeon_device *rdev); | |||
| 177 | void radeon_pm_resume(struct radeon_device *rdev); | 177 | void radeon_pm_resume(struct radeon_device *rdev); |
| 178 | void radeon_combios_get_power_modes(struct radeon_device *rdev); | 178 | void radeon_combios_get_power_modes(struct radeon_device *rdev); |
| 179 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); | 179 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); |
| 180 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); | 180 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); |
| 181 | void rs690_pm_info(struct radeon_device *rdev); | 181 | void rs690_pm_info(struct radeon_device *rdev); |
| 182 | extern int rv6xx_get_temp(struct radeon_device *rdev); | 182 | extern int rv6xx_get_temp(struct radeon_device *rdev); |
| 183 | extern int rv770_get_temp(struct radeon_device *rdev); | 183 | extern int rv770_get_temp(struct radeon_device *rdev); |
| @@ -767,7 +767,9 @@ struct radeon_voltage { | |||
| 767 | u8 vddci_id; /* index into vddci voltage table */ | 767 | u8 vddci_id; /* index into vddci voltage table */ |
| 768 | bool vddci_enabled; | 768 | bool vddci_enabled; |
| 769 | /* r6xx+ sw */ | 769 | /* r6xx+ sw */ |
| 770 | u32 voltage; | 770 | u16 voltage; |
| 771 | /* evergreen+ vddci */ | ||
| 772 | u16 vddci; | ||
| 771 | }; | 773 | }; |
| 772 | 774 | ||
| 773 | /* clock mode flags */ | 775 | /* clock mode flags */ |
| @@ -835,10 +837,12 @@ struct radeon_pm { | |||
| 835 | int default_power_state_index; | 837 | int default_power_state_index; |
| 836 | u32 current_sclk; | 838 | u32 current_sclk; |
| 837 | u32 current_mclk; | 839 | u32 current_mclk; |
| 838 | u32 current_vddc; | 840 | u16 current_vddc; |
| 841 | u16 current_vddci; | ||
| 839 | u32 default_sclk; | 842 | u32 default_sclk; |
| 840 | u32 default_mclk; | 843 | u32 default_mclk; |
| 841 | u32 default_vddc; | 844 | u16 default_vddc; |
| 845 | u16 default_vddci; | ||
| 842 | struct radeon_i2c_chan *i2c_bus; | 846 | struct radeon_i2c_chan *i2c_bus; |
| 843 | /* selected pm method */ | 847 | /* selected pm method */ |
| 844 | enum radeon_pm_method pm_method; | 848 | enum radeon_pm_method pm_method; |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index eb888ee5f674..ca576191d058 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
| @@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev) | |||
| 94 | rdev->mc_rreg = &rs600_mc_rreg; | 94 | rdev->mc_rreg = &rs600_mc_rreg; |
| 95 | rdev->mc_wreg = &rs600_mc_wreg; | 95 | rdev->mc_wreg = &rs600_mc_wreg; |
| 96 | } | 96 | } |
| 97 | if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) { | 97 | if (rdev->family >= CHIP_R600) { |
| 98 | rdev->pciep_rreg = &r600_pciep_rreg; | 98 | rdev->pciep_rreg = &r600_pciep_rreg; |
| 99 | rdev->pciep_wreg = &r600_pciep_wreg; | 99 | rdev->pciep_wreg = &r600_pciep_wreg; |
| 100 | } | 100 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 99768d9d91da..f5d12fb103fa 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -2176,24 +2176,27 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r | |||
| 2176 | } | 2176 | } |
| 2177 | } | 2177 | } |
| 2178 | 2178 | ||
| 2179 | static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev) | 2179 | static void radeon_atombios_get_default_voltages(struct radeon_device *rdev, |
| 2180 | u16 *vddc, u16 *vddci) | ||
| 2180 | { | 2181 | { |
| 2181 | struct radeon_mode_info *mode_info = &rdev->mode_info; | 2182 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
| 2182 | int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); | 2183 | int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); |
| 2183 | u8 frev, crev; | 2184 | u8 frev, crev; |
| 2184 | u16 data_offset; | 2185 | u16 data_offset; |
| 2185 | union firmware_info *firmware_info; | 2186 | union firmware_info *firmware_info; |
| 2186 | u16 vddc = 0; | 2187 | |
| 2188 | *vddc = 0; | ||
| 2189 | *vddci = 0; | ||
| 2187 | 2190 | ||
| 2188 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, | 2191 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
| 2189 | &frev, &crev, &data_offset)) { | 2192 | &frev, &crev, &data_offset)) { |
| 2190 | firmware_info = | 2193 | firmware_info = |
| 2191 | (union firmware_info *)(mode_info->atom_context->bios + | 2194 | (union firmware_info *)(mode_info->atom_context->bios + |
| 2192 | data_offset); | 2195 | data_offset); |
| 2193 | vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); | 2196 | *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); |
| 2197 | if ((frev == 2) && (crev >= 2)) | ||
| 2198 | *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage); | ||
| 2194 | } | 2199 | } |
| 2195 | |||
| 2196 | return vddc; | ||
| 2197 | } | 2200 | } |
| 2198 | 2201 | ||
| 2199 | static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev, | 2202 | static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev, |
| @@ -2203,7 +2206,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde | |||
| 2203 | int j; | 2206 | int j; |
| 2204 | u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); | 2207 | u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); |
| 2205 | u32 misc2 = le16_to_cpu(non_clock_info->usClassification); | 2208 | u32 misc2 = le16_to_cpu(non_clock_info->usClassification); |
| 2206 | u16 vddc = radeon_atombios_get_default_vddc(rdev); | 2209 | u16 vddc, vddci; |
| 2210 | |||
| 2211 | radeon_atombios_get_default_voltages(rdev, &vddc, &vddci); | ||
| 2207 | 2212 | ||
| 2208 | rdev->pm.power_state[state_index].misc = misc; | 2213 | rdev->pm.power_state[state_index].misc = misc; |
| 2209 | rdev->pm.power_state[state_index].misc2 = misc2; | 2214 | rdev->pm.power_state[state_index].misc2 = misc2; |
| @@ -2244,6 +2249,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde | |||
| 2244 | rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; | 2249 | rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; |
| 2245 | rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; | 2250 | rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; |
| 2246 | rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage; | 2251 | rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage; |
| 2252 | rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci; | ||
| 2247 | } else { | 2253 | } else { |
| 2248 | /* patch the table values with the default slck/mclk from firmware info */ | 2254 | /* patch the table values with the default slck/mclk from firmware info */ |
| 2249 | for (j = 0; j < mode_index; j++) { | 2255 | for (j = 0; j < mode_index; j++) { |
| @@ -2286,6 +2292,8 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, | |||
| 2286 | VOLTAGE_SW; | 2292 | VOLTAGE_SW; |
| 2287 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = | 2293 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = |
| 2288 | le16_to_cpu(clock_info->evergreen.usVDDC); | 2294 | le16_to_cpu(clock_info->evergreen.usVDDC); |
| 2295 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci = | ||
| 2296 | le16_to_cpu(clock_info->evergreen.usVDDCI); | ||
| 2289 | } else { | 2297 | } else { |
| 2290 | sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); | 2298 | sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); |
| 2291 | sclk |= clock_info->r600.ucEngineClockHigh << 16; | 2299 | sclk |= clock_info->r600.ucEngineClockHigh << 16; |
| @@ -2577,25 +2585,25 @@ union set_voltage { | |||
| 2577 | struct _SET_VOLTAGE_PARAMETERS_V2 v2; | 2585 | struct _SET_VOLTAGE_PARAMETERS_V2 v2; |
| 2578 | }; | 2586 | }; |
| 2579 | 2587 | ||
| 2580 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level) | 2588 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type) |
| 2581 | { | 2589 | { |
| 2582 | union set_voltage args; | 2590 | union set_voltage args; |
| 2583 | int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); | 2591 | int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); |
| 2584 | u8 frev, crev, volt_index = level; | 2592 | u8 frev, crev, volt_index = voltage_level; |
| 2585 | 2593 | ||
| 2586 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | 2594 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) |
| 2587 | return; | 2595 | return; |
| 2588 | 2596 | ||
| 2589 | switch (crev) { | 2597 | switch (crev) { |
| 2590 | case 1: | 2598 | case 1: |
| 2591 | args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; | 2599 | args.v1.ucVoltageType = voltage_type; |
| 2592 | args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; | 2600 | args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; |
| 2593 | args.v1.ucVoltageIndex = volt_index; | 2601 | args.v1.ucVoltageIndex = volt_index; |
| 2594 | break; | 2602 | break; |
| 2595 | case 2: | 2603 | case 2: |
| 2596 | args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; | 2604 | args.v2.ucVoltageType = voltage_type; |
| 2597 | args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; | 2605 | args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; |
| 2598 | args.v2.usVoltageLevel = cpu_to_le16(level); | 2606 | args.v2.usVoltageLevel = cpu_to_le16(voltage_level); |
| 2599 | break; | 2607 | break; |
| 2600 | default: | 2608 | default: |
| 2601 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | 2609 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 9e59868d354e..bbcd1dd7bac0 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
| @@ -79,7 +79,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) | |||
| 79 | scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; | 79 | scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; |
| 80 | else | 80 | else |
| 81 | scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; | 81 | scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; |
| 82 | seq = rdev->wb.wb[scratch_index/4]; | 82 | seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]); |
| 83 | } else | 83 | } else |
| 84 | seq = RREG32(rdev->fence_drv.scratch_reg); | 84 | seq = RREG32(rdev->fence_drv.scratch_reg); |
| 85 | if (seq != rdev->fence_drv.last_seq) { | 85 | if (seq != rdev->fence_drv.last_seq) { |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index f0534ef2f331..8a955bbdb608 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
| @@ -285,4 +285,6 @@ void radeon_gart_fini(struct radeon_device *rdev) | |||
| 285 | rdev->gart.pages = NULL; | 285 | rdev->gart.pages = NULL; |
| 286 | rdev->gart.pages_addr = NULL; | 286 | rdev->gart.pages_addr = NULL; |
| 287 | rdev->gart.ttm_alloced = NULL; | 287 | rdev->gart.ttm_alloced = NULL; |
| 288 | |||
| 289 | radeon_dummy_page_fini(rdev); | ||
| 288 | } | 290 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index ded2a45bc95c..ccbabf734a61 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
| @@ -1062,7 +1062,7 @@ void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus, | |||
| 1062 | *val = in_buf[0]; | 1062 | *val = in_buf[0]; |
| 1063 | DRM_DEBUG("val = 0x%02x\n", *val); | 1063 | DRM_DEBUG("val = 0x%02x\n", *val); |
| 1064 | } else { | 1064 | } else { |
| 1065 | DRM_ERROR("i2c 0x%02x 0x%02x read failed\n", | 1065 | DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n", |
| 1066 | addr, *val); | 1066 | addr, *val); |
| 1067 | } | 1067 | } |
| 1068 | } | 1068 | } |
| @@ -1084,7 +1084,7 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus, | |||
| 1084 | out_buf[1] = val; | 1084 | out_buf[1] = val; |
| 1085 | 1085 | ||
| 1086 | if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) | 1086 | if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) |
| 1087 | DRM_ERROR("i2c 0x%02x 0x%02x write failed\n", | 1087 | DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n", |
| 1088 | addr, val); | 1088 | addr, val); |
| 1089 | } | 1089 | } |
| 1090 | 1090 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 5b54268ed6b2..2f46e0c8df53 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
| @@ -269,7 +269,7 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { | |||
| 269 | .disable = radeon_legacy_encoder_disable, | 269 | .disable = radeon_legacy_encoder_disable, |
| 270 | }; | 270 | }; |
| 271 | 271 | ||
| 272 | #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE | 272 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) |
| 273 | 273 | ||
| 274 | #define MAX_RADEON_LEVEL 0xFF | 274 | #define MAX_RADEON_LEVEL 0xFF |
| 275 | 275 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 08de669e025a..86eda1ea94df 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include "drmP.h" | 23 | #include "drmP.h" |
| 24 | #include "radeon.h" | 24 | #include "radeon.h" |
| 25 | #include "avivod.h" | 25 | #include "avivod.h" |
| 26 | #include "atom.h" | ||
| 26 | #ifdef CONFIG_ACPI | 27 | #ifdef CONFIG_ACPI |
| 27 | #include <linux/acpi.h> | 28 | #include <linux/acpi.h> |
| 28 | #endif | 29 | #endif |
| @@ -535,7 +536,11 @@ void radeon_pm_resume(struct radeon_device *rdev) | |||
| 535 | /* set up the default clocks if the MC ucode is loaded */ | 536 | /* set up the default clocks if the MC ucode is loaded */ |
| 536 | if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { | 537 | if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { |
| 537 | if (rdev->pm.default_vddc) | 538 | if (rdev->pm.default_vddc) |
| 538 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc); | 539 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, |
| 540 | SET_VOLTAGE_TYPE_ASIC_VDDC); | ||
| 541 | if (rdev->pm.default_vddci) | ||
| 542 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, | ||
| 543 | SET_VOLTAGE_TYPE_ASIC_VDDCI); | ||
| 539 | if (rdev->pm.default_sclk) | 544 | if (rdev->pm.default_sclk) |
| 540 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); | 545 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); |
| 541 | if (rdev->pm.default_mclk) | 546 | if (rdev->pm.default_mclk) |
| @@ -548,6 +553,7 @@ void radeon_pm_resume(struct radeon_device *rdev) | |||
| 548 | rdev->pm.current_sclk = rdev->pm.default_sclk; | 553 | rdev->pm.current_sclk = rdev->pm.default_sclk; |
| 549 | rdev->pm.current_mclk = rdev->pm.default_mclk; | 554 | rdev->pm.current_mclk = rdev->pm.default_mclk; |
| 550 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; | 555 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; |
| 556 | rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; | ||
| 551 | if (rdev->pm.pm_method == PM_METHOD_DYNPM | 557 | if (rdev->pm.pm_method == PM_METHOD_DYNPM |
| 552 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { | 558 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { |
| 553 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; | 559 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; |
| @@ -585,7 +591,8 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
| 585 | /* set up the default clocks if the MC ucode is loaded */ | 591 | /* set up the default clocks if the MC ucode is loaded */ |
| 586 | if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { | 592 | if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { |
| 587 | if (rdev->pm.default_vddc) | 593 | if (rdev->pm.default_vddc) |
| 588 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc); | 594 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, |
| 595 | SET_VOLTAGE_TYPE_ASIC_VDDC); | ||
| 589 | if (rdev->pm.default_sclk) | 596 | if (rdev->pm.default_sclk) |
| 590 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); | 597 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); |
| 591 | if (rdev->pm.default_mclk) | 598 | if (rdev->pm.default_mclk) |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index bbc9cd823334..c6776e48fdde 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
| @@ -248,7 +248,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) | |||
| 248 | void radeon_ring_free_size(struct radeon_device *rdev) | 248 | void radeon_ring_free_size(struct radeon_device *rdev) |
| 249 | { | 249 | { |
| 250 | if (rdev->wb.enabled) | 250 | if (rdev->wb.enabled) |
| 251 | rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]; | 251 | rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]); |
| 252 | else { | 252 | else { |
| 253 | if (rdev->family >= CHIP_R600) | 253 | if (rdev->family >= CHIP_R600) |
| 254 | rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); | 254 | rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 876cebc4b8ba..6e3b11e5abbe 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -114,7 +114,7 @@ void rs600_pm_misc(struct radeon_device *rdev) | |||
| 114 | udelay(voltage->delay); | 114 | udelay(voltage->delay); |
| 115 | } | 115 | } |
| 116 | } else if (voltage->type == VOLTAGE_VDDC) | 116 | } else if (voltage->type == VOLTAGE_VDDC) |
| 117 | radeon_atom_set_voltage(rdev, voltage->vddc_id); | 117 | radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC); |
| 118 | 118 | ||
| 119 | dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); | 119 | dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); |
| 120 | dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); | 120 | dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index b974ac7df8df..ef8a5babe9f7 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -106,7 +106,7 @@ void rv770_pm_misc(struct radeon_device *rdev) | |||
| 106 | 106 | ||
| 107 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { | 107 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { |
| 108 | if (voltage->voltage != rdev->pm.current_vddc) { | 108 | if (voltage->voltage != rdev->pm.current_vddc) { |
| 109 | radeon_atom_set_voltage(rdev, voltage->voltage); | 109 | radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); |
| 110 | rdev->pm.current_vddc = voltage->voltage; | 110 | rdev->pm.current_vddc = voltage->voltage; |
| 111 | DRM_DEBUG("Setting: v: %d\n", voltage->voltage); | 111 | DRM_DEBUG("Setting: v: %d\n", voltage->voltage); |
| 112 | } | 112 | } |
| @@ -1255,9 +1255,6 @@ int rv770_init(struct radeon_device *rdev) | |||
| 1255 | { | 1255 | { |
| 1256 | int r; | 1256 | int r; |
| 1257 | 1257 | ||
| 1258 | r = radeon_dummy_page_init(rdev); | ||
| 1259 | if (r) | ||
| 1260 | return r; | ||
| 1261 | /* This don't do much */ | 1258 | /* This don't do much */ |
| 1262 | r = radeon_gem_init(rdev); | 1259 | r = radeon_gem_init(rdev); |
| 1263 | if (r) | 1260 | if (r) |
| @@ -1372,7 +1369,6 @@ void rv770_fini(struct radeon_device *rdev) | |||
| 1372 | radeon_atombios_fini(rdev); | 1369 | radeon_atombios_fini(rdev); |
| 1373 | kfree(rdev->bios); | 1370 | kfree(rdev->bios); |
| 1374 | rdev->bios = NULL; | 1371 | rdev->bios = NULL; |
| 1375 | radeon_dummy_page_fini(rdev); | ||
| 1376 | } | 1372 | } |
| 1377 | 1373 | ||
| 1378 | static void rv770_pcie_gen2_enable(struct radeon_device *rdev) | 1374 | static void rv770_pcie_gen2_enable(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 737a2a2e46a5..9d9d92945f8c 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
| @@ -683,22 +683,14 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
| 683 | gfp_flags |= GFP_HIGHUSER; | 683 | gfp_flags |= GFP_HIGHUSER; |
| 684 | 684 | ||
| 685 | for (r = 0; r < count; ++r) { | 685 | for (r = 0; r < count; ++r) { |
| 686 | if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) { | 686 | p = alloc_page(gfp_flags); |
| 687 | void *addr; | ||
| 688 | addr = dma_alloc_coherent(NULL, PAGE_SIZE, | ||
| 689 | &dma_address[r], | ||
| 690 | gfp_flags); | ||
| 691 | if (addr == NULL) | ||
| 692 | return -ENOMEM; | ||
| 693 | p = virt_to_page(addr); | ||
| 694 | } else | ||
| 695 | p = alloc_page(gfp_flags); | ||
| 696 | if (!p) { | 687 | if (!p) { |
| 697 | 688 | ||
| 698 | printk(KERN_ERR TTM_PFX | 689 | printk(KERN_ERR TTM_PFX |
| 699 | "Unable to allocate page."); | 690 | "Unable to allocate page."); |
| 700 | return -ENOMEM; | 691 | return -ENOMEM; |
| 701 | } | 692 | } |
| 693 | |||
| 702 | list_add(&p->lru, pages); | 694 | list_add(&p->lru, pages); |
| 703 | } | 695 | } |
| 704 | return 0; | 696 | return 0; |
| @@ -746,24 +738,12 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, | |||
| 746 | unsigned long irq_flags; | 738 | unsigned long irq_flags; |
| 747 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | 739 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
| 748 | struct page *p, *tmp; | 740 | struct page *p, *tmp; |
| 749 | unsigned r; | ||
| 750 | 741 | ||
| 751 | if (pool == NULL) { | 742 | if (pool == NULL) { |
| 752 | /* No pool for this memory type so free the pages */ | 743 | /* No pool for this memory type so free the pages */ |
| 753 | 744 | ||
| 754 | r = page_count-1; | ||
| 755 | list_for_each_entry_safe(p, tmp, pages, lru) { | 745 | list_for_each_entry_safe(p, tmp, pages, lru) { |
| 756 | if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) { | 746 | __free_page(p); |
| 757 | void *addr = page_address(p); | ||
| 758 | WARN_ON(!addr || !dma_address[r]); | ||
| 759 | if (addr) | ||
| 760 | dma_free_coherent(NULL, PAGE_SIZE, | ||
| 761 | addr, | ||
| 762 | dma_address[r]); | ||
| 763 | dma_address[r] = 0; | ||
| 764 | } else | ||
| 765 | __free_page(p); | ||
| 766 | r--; | ||
| 767 | } | 747 | } |
| 768 | /* Make the pages list empty */ | 748 | /* Make the pages list empty */ |
| 769 | INIT_LIST_HEAD(pages); | 749 | INIT_LIST_HEAD(pages); |
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig index 70e60a4bb678..419917955bf6 100644 --- a/drivers/gpu/stub/Kconfig +++ b/drivers/gpu/stub/Kconfig | |||
| @@ -5,6 +5,7 @@ config STUB_POULSBO | |||
| 5 | # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled | 5 | # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled |
| 6 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick | 6 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick |
| 7 | select BACKLIGHT_CLASS_DEVICE if ACPI | 7 | select BACKLIGHT_CLASS_DEVICE if ACPI |
| 8 | select VIDEO_OUTPUT_CONTROL if ACPI | ||
| 8 | select INPUT if ACPI | 9 | select INPUT if ACPI |
| 9 | select ACPI_VIDEO if ACPI | 10 | select ACPI_VIDEO if ACPI |
| 10 | select THERMAL if ACPI | 11 | select THERMAL if ACPI |
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index 38319a69bd0a..d6d58684712b 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c | |||
| @@ -232,9 +232,17 @@ static int i2c_inb(struct i2c_adapter *i2c_adap) | |||
| 232 | * Sanity check for the adapter hardware - check the reaction of | 232 | * Sanity check for the adapter hardware - check the reaction of |
| 233 | * the bus lines only if it seems to be idle. | 233 | * the bus lines only if it seems to be idle. |
| 234 | */ | 234 | */ |
| 235 | static int test_bus(struct i2c_algo_bit_data *adap, char *name) | 235 | static int test_bus(struct i2c_adapter *i2c_adap) |
| 236 | { | 236 | { |
| 237 | int scl, sda; | 237 | struct i2c_algo_bit_data *adap = i2c_adap->algo_data; |
| 238 | const char *name = i2c_adap->name; | ||
| 239 | int scl, sda, ret; | ||
| 240 | |||
| 241 | if (adap->pre_xfer) { | ||
| 242 | ret = adap->pre_xfer(i2c_adap); | ||
| 243 | if (ret < 0) | ||
| 244 | return -ENODEV; | ||
| 245 | } | ||
| 238 | 246 | ||
| 239 | if (adap->getscl == NULL) | 247 | if (adap->getscl == NULL) |
| 240 | pr_info("%s: Testing SDA only, SCL is not readable\n", name); | 248 | pr_info("%s: Testing SDA only, SCL is not readable\n", name); |
| @@ -297,11 +305,19 @@ static int test_bus(struct i2c_algo_bit_data *adap, char *name) | |||
| 297 | "while pulling SCL high!\n", name); | 305 | "while pulling SCL high!\n", name); |
| 298 | goto bailout; | 306 | goto bailout; |
| 299 | } | 307 | } |
| 308 | |||
| 309 | if (adap->post_xfer) | ||
| 310 | adap->post_xfer(i2c_adap); | ||
| 311 | |||
| 300 | pr_info("%s: Test OK\n", name); | 312 | pr_info("%s: Test OK\n", name); |
| 301 | return 0; | 313 | return 0; |
| 302 | bailout: | 314 | bailout: |
| 303 | sdahi(adap); | 315 | sdahi(adap); |
| 304 | sclhi(adap); | 316 | sclhi(adap); |
| 317 | |||
| 318 | if (adap->post_xfer) | ||
| 319 | adap->post_xfer(i2c_adap); | ||
| 320 | |||
| 305 | return -ENODEV; | 321 | return -ENODEV; |
| 306 | } | 322 | } |
| 307 | 323 | ||
| @@ -607,7 +623,7 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap, | |||
| 607 | int ret; | 623 | int ret; |
| 608 | 624 | ||
| 609 | if (bit_test) { | 625 | if (bit_test) { |
| 610 | ret = test_bus(bit_adap, adap->name); | 626 | ret = test_bus(adap); |
| 611 | if (ret < 0) | 627 | if (ret < 0) |
| 612 | return -ENODEV; | 628 | return -ENODEV; |
| 613 | } | 629 | } |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 70c30e6bce0b..9a58994ff7ea 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
| @@ -797,7 +797,8 @@ static int i2c_do_add_adapter(struct i2c_driver *driver, | |||
| 797 | 797 | ||
| 798 | /* Let legacy drivers scan this bus for matching devices */ | 798 | /* Let legacy drivers scan this bus for matching devices */ |
| 799 | if (driver->attach_adapter) { | 799 | if (driver->attach_adapter) { |
| 800 | dev_warn(&adap->dev, "attach_adapter method is deprecated\n"); | 800 | dev_warn(&adap->dev, "%s: attach_adapter method is deprecated\n", |
| 801 | driver->driver.name); | ||
| 801 | dev_warn(&adap->dev, "Please use another way to instantiate " | 802 | dev_warn(&adap->dev, "Please use another way to instantiate " |
| 802 | "your i2c_client\n"); | 803 | "your i2c_client\n"); |
| 803 | /* We ignore the return code; if it fails, too bad */ | 804 | /* We ignore the return code; if it fails, too bad */ |
| @@ -984,7 +985,8 @@ static int i2c_do_del_adapter(struct i2c_driver *driver, | |||
| 984 | 985 | ||
| 985 | if (!driver->detach_adapter) | 986 | if (!driver->detach_adapter) |
| 986 | return 0; | 987 | return 0; |
| 987 | dev_warn(&adapter->dev, "detach_adapter method is deprecated\n"); | 988 | dev_warn(&adapter->dev, "%s: detach_adapter method is deprecated\n", |
| 989 | driver->driver.name); | ||
| 988 | res = driver->detach_adapter(adapter); | 990 | res = driver->detach_adapter(adapter); |
| 989 | if (res) | 991 | if (res) |
| 990 | dev_err(&adapter->dev, "detach_adapter failed (%d) " | 992 | dev_err(&adapter->dev, "detach_adapter failed (%d) " |
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 7f42d3a454d2..88d8e4cb419a 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c | |||
| @@ -39,13 +39,13 @@ struct evdev { | |||
| 39 | }; | 39 | }; |
| 40 | 40 | ||
| 41 | struct evdev_client { | 41 | struct evdev_client { |
| 42 | int head; | 42 | unsigned int head; |
| 43 | int tail; | 43 | unsigned int tail; |
| 44 | spinlock_t buffer_lock; /* protects access to buffer, head and tail */ | 44 | spinlock_t buffer_lock; /* protects access to buffer, head and tail */ |
| 45 | struct fasync_struct *fasync; | 45 | struct fasync_struct *fasync; |
| 46 | struct evdev *evdev; | 46 | struct evdev *evdev; |
| 47 | struct list_head node; | 47 | struct list_head node; |
| 48 | int bufsize; | 48 | unsigned int bufsize; |
| 49 | struct input_event buffer[]; | 49 | struct input_event buffer[]; |
| 50 | }; | 50 | }; |
| 51 | 51 | ||
| @@ -55,16 +55,25 @@ static DEFINE_MUTEX(evdev_table_mutex); | |||
| 55 | static void evdev_pass_event(struct evdev_client *client, | 55 | static void evdev_pass_event(struct evdev_client *client, |
| 56 | struct input_event *event) | 56 | struct input_event *event) |
| 57 | { | 57 | { |
| 58 | /* | 58 | /* Interrupts are disabled, just acquire the lock. */ |
| 59 | * Interrupts are disabled, just acquire the lock. | ||
| 60 | * Make sure we don't leave with the client buffer | ||
| 61 | * "empty" by having client->head == client->tail. | ||
| 62 | */ | ||
| 63 | spin_lock(&client->buffer_lock); | 59 | spin_lock(&client->buffer_lock); |
| 64 | do { | 60 | |
| 65 | client->buffer[client->head++] = *event; | 61 | client->buffer[client->head++] = *event; |
| 66 | client->head &= client->bufsize - 1; | 62 | client->head &= client->bufsize - 1; |
| 67 | } while (client->head == client->tail); | 63 | |
| 64 | if (unlikely(client->head == client->tail)) { | ||
| 65 | /* | ||
| 66 | * This effectively "drops" all unconsumed events, leaving | ||
| 67 | * EV_SYN/SYN_DROPPED plus the newest event in the queue. | ||
| 68 | */ | ||
| 69 | client->tail = (client->head - 2) & (client->bufsize - 1); | ||
| 70 | |||
| 71 | client->buffer[client->tail].time = event->time; | ||
| 72 | client->buffer[client->tail].type = EV_SYN; | ||
| 73 | client->buffer[client->tail].code = SYN_DROPPED; | ||
| 74 | client->buffer[client->tail].value = 0; | ||
| 75 | } | ||
| 76 | |||
| 68 | spin_unlock(&client->buffer_lock); | 77 | spin_unlock(&client->buffer_lock); |
| 69 | 78 | ||
| 70 | if (event->type == EV_SYN) | 79 | if (event->type == EV_SYN) |
diff --git a/drivers/input/input.c b/drivers/input/input.c index d6e8bd8a851c..ebbceedc92f4 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
| @@ -1746,6 +1746,42 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int | |||
| 1746 | } | 1746 | } |
| 1747 | EXPORT_SYMBOL(input_set_capability); | 1747 | EXPORT_SYMBOL(input_set_capability); |
| 1748 | 1748 | ||
| 1749 | static unsigned int input_estimate_events_per_packet(struct input_dev *dev) | ||
| 1750 | { | ||
| 1751 | int mt_slots; | ||
| 1752 | int i; | ||
| 1753 | unsigned int events; | ||
| 1754 | |||
| 1755 | if (dev->mtsize) { | ||
| 1756 | mt_slots = dev->mtsize; | ||
| 1757 | } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { | ||
| 1758 | mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - | ||
| 1759 | dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, | ||
| 1760 | clamp(mt_slots, 2, 32); | ||
| 1761 | } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { | ||
| 1762 | mt_slots = 2; | ||
| 1763 | } else { | ||
| 1764 | mt_slots = 0; | ||
| 1765 | } | ||
| 1766 | |||
| 1767 | events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */ | ||
| 1768 | |||
| 1769 | for (i = 0; i < ABS_CNT; i++) { | ||
| 1770 | if (test_bit(i, dev->absbit)) { | ||
| 1771 | if (input_is_mt_axis(i)) | ||
| 1772 | events += mt_slots; | ||
| 1773 | else | ||
| 1774 | events++; | ||
| 1775 | } | ||
| 1776 | } | ||
| 1777 | |||
| 1778 | for (i = 0; i < REL_CNT; i++) | ||
| 1779 | if (test_bit(i, dev->relbit)) | ||
| 1780 | events++; | ||
| 1781 | |||
| 1782 | return events; | ||
| 1783 | } | ||
| 1784 | |||
| 1749 | #define INPUT_CLEANSE_BITMASK(dev, type, bits) \ | 1785 | #define INPUT_CLEANSE_BITMASK(dev, type, bits) \ |
| 1750 | do { \ | 1786 | do { \ |
| 1751 | if (!test_bit(EV_##type, dev->evbit)) \ | 1787 | if (!test_bit(EV_##type, dev->evbit)) \ |
| @@ -1793,6 +1829,10 @@ int input_register_device(struct input_dev *dev) | |||
| 1793 | /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ | 1829 | /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ |
| 1794 | input_cleanse_bitmasks(dev); | 1830 | input_cleanse_bitmasks(dev); |
| 1795 | 1831 | ||
| 1832 | if (!dev->hint_events_per_packet) | ||
| 1833 | dev->hint_events_per_packet = | ||
| 1834 | input_estimate_events_per_packet(dev); | ||
| 1835 | |||
| 1796 | /* | 1836 | /* |
| 1797 | * If delay and period are pre-set by the driver, then autorepeating | 1837 | * If delay and period are pre-set by the driver, then autorepeating |
| 1798 | * is handled by the driver itself and we don't do it in input.c. | 1838 | * is handled by the driver itself and we don't do it in input.c. |
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c index 09bef79d9da1..a26922cf0e84 100644 --- a/drivers/input/keyboard/twl4030_keypad.c +++ b/drivers/input/keyboard/twl4030_keypad.c | |||
| @@ -332,18 +332,20 @@ static int __devinit twl4030_kp_program(struct twl4030_keypad *kp) | |||
| 332 | static int __devinit twl4030_kp_probe(struct platform_device *pdev) | 332 | static int __devinit twl4030_kp_probe(struct platform_device *pdev) |
| 333 | { | 333 | { |
| 334 | struct twl4030_keypad_data *pdata = pdev->dev.platform_data; | 334 | struct twl4030_keypad_data *pdata = pdev->dev.platform_data; |
| 335 | const struct matrix_keymap_data *keymap_data = pdata->keymap_data; | 335 | const struct matrix_keymap_data *keymap_data; |
| 336 | struct twl4030_keypad *kp; | 336 | struct twl4030_keypad *kp; |
| 337 | struct input_dev *input; | 337 | struct input_dev *input; |
| 338 | u8 reg; | 338 | u8 reg; |
| 339 | int error; | 339 | int error; |
| 340 | 340 | ||
| 341 | if (!pdata || !pdata->rows || !pdata->cols || | 341 | if (!pdata || !pdata->rows || !pdata->cols || !pdata->keymap_data || |
| 342 | pdata->rows > TWL4030_MAX_ROWS || pdata->cols > TWL4030_MAX_COLS) { | 342 | pdata->rows > TWL4030_MAX_ROWS || pdata->cols > TWL4030_MAX_COLS) { |
| 343 | dev_err(&pdev->dev, "Invalid platform_data\n"); | 343 | dev_err(&pdev->dev, "Invalid platform_data\n"); |
| 344 | return -EINVAL; | 344 | return -EINVAL; |
| 345 | } | 345 | } |
| 346 | 346 | ||
| 347 | keymap_data = pdata->keymap_data; | ||
| 348 | |||
| 347 | kp = kzalloc(sizeof(*kp), GFP_KERNEL); | 349 | kp = kzalloc(sizeof(*kp), GFP_KERNEL); |
| 348 | input = input_allocate_device(); | 350 | input = input_allocate_device(); |
| 349 | if (!kp || !input) { | 351 | if (!kp || !input) { |
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c index 7077f9bf5ead..62bae99424e6 100644 --- a/drivers/input/misc/xen-kbdfront.c +++ b/drivers/input/misc/xen-kbdfront.c | |||
| @@ -303,7 +303,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, | |||
| 303 | enum xenbus_state backend_state) | 303 | enum xenbus_state backend_state) |
| 304 | { | 304 | { |
| 305 | struct xenkbd_info *info = dev_get_drvdata(&dev->dev); | 305 | struct xenkbd_info *info = dev_get_drvdata(&dev->dev); |
| 306 | int val; | 306 | int ret, val; |
| 307 | 307 | ||
| 308 | switch (backend_state) { | 308 | switch (backend_state) { |
| 309 | case XenbusStateInitialising: | 309 | case XenbusStateInitialising: |
| @@ -316,6 +316,17 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, | |||
| 316 | 316 | ||
| 317 | case XenbusStateInitWait: | 317 | case XenbusStateInitWait: |
| 318 | InitWait: | 318 | InitWait: |
| 319 | ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend, | ||
| 320 | "feature-abs-pointer", "%d", &val); | ||
| 321 | if (ret < 0) | ||
| 322 | val = 0; | ||
| 323 | if (val) { | ||
| 324 | ret = xenbus_printf(XBT_NIL, info->xbdev->nodename, | ||
| 325 | "request-abs-pointer", "1"); | ||
| 326 | if (ret) | ||
| 327 | pr_warning("xenkbd: can't request abs-pointer"); | ||
| 328 | } | ||
| 329 | |||
| 319 | xenbus_switch_state(dev, XenbusStateConnected); | 330 | xenbus_switch_state(dev, XenbusStateConnected); |
| 320 | break; | 331 | break; |
| 321 | 332 | ||
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c index efa06882de00..45f93d0f5592 100644 --- a/drivers/input/touchscreen/h3600_ts_input.c +++ b/drivers/input/touchscreen/h3600_ts_input.c | |||
| @@ -399,31 +399,34 @@ static int h3600ts_connect(struct serio *serio, struct serio_driver *drv) | |||
| 399 | IRQF_SHARED | IRQF_DISABLED, "h3600_action", &ts->dev)) { | 399 | IRQF_SHARED | IRQF_DISABLED, "h3600_action", &ts->dev)) { |
| 400 | printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n"); | 400 | printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n"); |
| 401 | err = -EBUSY; | 401 | err = -EBUSY; |
| 402 | goto fail2; | 402 | goto fail1; |
| 403 | } | 403 | } |
| 404 | 404 | ||
| 405 | if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler, | 405 | if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler, |
| 406 | IRQF_SHARED | IRQF_DISABLED, "h3600_suspend", &ts->dev)) { | 406 | IRQF_SHARED | IRQF_DISABLED, "h3600_suspend", &ts->dev)) { |
| 407 | printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n"); | 407 | printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n"); |
| 408 | err = -EBUSY; | 408 | err = -EBUSY; |
| 409 | goto fail3; | 409 | goto fail2; |
| 410 | } | 410 | } |
| 411 | 411 | ||
| 412 | serio_set_drvdata(serio, ts); | 412 | serio_set_drvdata(serio, ts); |
| 413 | 413 | ||
| 414 | err = serio_open(serio, drv); | 414 | err = serio_open(serio, drv); |
| 415 | if (err) | 415 | if (err) |
| 416 | return err; | 416 | goto fail3; |
| 417 | 417 | ||
| 418 | //h3600_flite_control(1, 25); /* default brightness */ | 418 | //h3600_flite_control(1, 25); /* default brightness */ |
| 419 | input_register_device(ts->dev); | 419 | err = input_register_device(ts->dev); |
| 420 | if (err) | ||
| 421 | goto fail4; | ||
| 420 | 422 | ||
| 421 | return 0; | 423 | return 0; |
| 422 | 424 | ||
| 423 | fail3: free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev); | 425 | fail4: serio_close(serio); |
| 426 | fail3: serio_set_drvdata(serio, NULL); | ||
| 427 | free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev); | ||
| 424 | fail2: free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev); | 428 | fail2: free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev); |
| 425 | fail1: serio_set_drvdata(serio, NULL); | 429 | fail1: input_free_device(input_dev); |
| 426 | input_free_device(input_dev); | ||
| 427 | kfree(ts); | 430 | kfree(ts); |
| 428 | return err; | 431 | return err; |
| 429 | } | 432 | } |
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c index 3790816643be..8497f56f8e46 100644 --- a/drivers/leds/leds-regulator.c +++ b/drivers/leds/leds-regulator.c | |||
| @@ -178,6 +178,10 @@ static int __devinit regulator_led_probe(struct platform_device *pdev) | |||
| 178 | led->cdev.flags |= LED_CORE_SUSPENDRESUME; | 178 | led->cdev.flags |= LED_CORE_SUSPENDRESUME; |
| 179 | led->vcc = vcc; | 179 | led->vcc = vcc; |
| 180 | 180 | ||
| 181 | /* to handle correctly an already enabled regulator */ | ||
| 182 | if (regulator_is_enabled(led->vcc)) | ||
| 183 | led->enabled = 1; | ||
| 184 | |||
| 181 | mutex_init(&led->mutex); | 185 | mutex_init(&led->mutex); |
| 182 | INIT_WORK(&led->work, led_work); | 186 | INIT_WORK(&led->work, led_work); |
| 183 | 187 | ||
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 5ef136cdba91..e5d8904fc8f6 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
| @@ -390,13 +390,6 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits) | |||
| 390 | return md_raid5_congested(&rs->md, bits); | 390 | return md_raid5_congested(&rs->md, bits); |
| 391 | } | 391 | } |
| 392 | 392 | ||
| 393 | static void raid_unplug(struct dm_target_callbacks *cb) | ||
| 394 | { | ||
| 395 | struct raid_set *rs = container_of(cb, struct raid_set, callbacks); | ||
| 396 | |||
| 397 | md_raid5_kick_device(rs->md.private); | ||
| 398 | } | ||
| 399 | |||
| 400 | /* | 393 | /* |
| 401 | * Construct a RAID4/5/6 mapping: | 394 | * Construct a RAID4/5/6 mapping: |
| 402 | * Args: | 395 | * Args: |
| @@ -487,7 +480,6 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 487 | } | 480 | } |
| 488 | 481 | ||
| 489 | rs->callbacks.congested_fn = raid_is_congested; | 482 | rs->callbacks.congested_fn = raid_is_congested; |
| 490 | rs->callbacks.unplug_fn = raid_unplug; | ||
| 491 | dm_table_add_target_callbacks(ti->table, &rs->callbacks); | 483 | dm_table_add_target_callbacks(ti->table, &rs->callbacks); |
| 492 | 484 | ||
| 493 | return 0; | 485 | return 0; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index b12b3776c0c0..6e853c61d87e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -447,48 +447,59 @@ EXPORT_SYMBOL(md_flush_request); | |||
| 447 | 447 | ||
| 448 | /* Support for plugging. | 448 | /* Support for plugging. |
| 449 | * This mirrors the plugging support in request_queue, but does not | 449 | * This mirrors the plugging support in request_queue, but does not |
| 450 | * require having a whole queue | 450 | * require having a whole queue or request structures. |
| 451 | * We allocate an md_plug_cb for each md device and each thread it gets | ||
| 452 | * plugged on. This links tot the private plug_handle structure in the | ||
| 453 | * personality data where we keep a count of the number of outstanding | ||
| 454 | * plugs so other code can see if a plug is active. | ||
| 451 | */ | 455 | */ |
| 452 | static void plugger_work(struct work_struct *work) | 456 | struct md_plug_cb { |
| 453 | { | 457 | struct blk_plug_cb cb; |
| 454 | struct plug_handle *plug = | 458 | mddev_t *mddev; |
| 455 | container_of(work, struct plug_handle, unplug_work); | 459 | }; |
| 456 | plug->unplug_fn(plug); | ||
| 457 | } | ||
| 458 | static void plugger_timeout(unsigned long data) | ||
| 459 | { | ||
| 460 | struct plug_handle *plug = (void *)data; | ||
| 461 | kblockd_schedule_work(NULL, &plug->unplug_work); | ||
| 462 | } | ||
| 463 | void plugger_init(struct plug_handle *plug, | ||
| 464 | void (*unplug_fn)(struct plug_handle *)) | ||
| 465 | { | ||
| 466 | plug->unplug_flag = 0; | ||
| 467 | plug->unplug_fn = unplug_fn; | ||
| 468 | init_timer(&plug->unplug_timer); | ||
| 469 | plug->unplug_timer.function = plugger_timeout; | ||
| 470 | plug->unplug_timer.data = (unsigned long)plug; | ||
| 471 | INIT_WORK(&plug->unplug_work, plugger_work); | ||
| 472 | } | ||
| 473 | EXPORT_SYMBOL_GPL(plugger_init); | ||
| 474 | 460 | ||
| 475 | void plugger_set_plug(struct plug_handle *plug) | 461 | static void plugger_unplug(struct blk_plug_cb *cb) |
| 476 | { | 462 | { |
| 477 | if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag)) | 463 | struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); |
| 478 | mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1); | 464 | if (atomic_dec_and_test(&mdcb->mddev->plug_cnt)) |
| 465 | md_wakeup_thread(mdcb->mddev->thread); | ||
| 466 | kfree(mdcb); | ||
| 479 | } | 467 | } |
| 480 | EXPORT_SYMBOL_GPL(plugger_set_plug); | ||
| 481 | 468 | ||
| 482 | int plugger_remove_plug(struct plug_handle *plug) | 469 | /* Check that an unplug wakeup will come shortly. |
| 470 | * If not, wakeup the md thread immediately | ||
| 471 | */ | ||
| 472 | int mddev_check_plugged(mddev_t *mddev) | ||
| 483 | { | 473 | { |
| 484 | if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) { | 474 | struct blk_plug *plug = current->plug; |
| 485 | del_timer(&plug->unplug_timer); | 475 | struct md_plug_cb *mdcb; |
| 486 | return 1; | 476 | |
| 487 | } else | 477 | if (!plug) |
| 478 | return 0; | ||
| 479 | |||
| 480 | list_for_each_entry(mdcb, &plug->cb_list, cb.list) { | ||
| 481 | if (mdcb->cb.callback == plugger_unplug && | ||
| 482 | mdcb->mddev == mddev) { | ||
| 483 | /* Already on the list, move to top */ | ||
| 484 | if (mdcb != list_first_entry(&plug->cb_list, | ||
| 485 | struct md_plug_cb, | ||
| 486 | cb.list)) | ||
| 487 | list_move(&mdcb->cb.list, &plug->cb_list); | ||
| 488 | return 1; | ||
| 489 | } | ||
| 490 | } | ||
| 491 | /* Not currently on the callback list */ | ||
| 492 | mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC); | ||
| 493 | if (!mdcb) | ||
| 488 | return 0; | 494 | return 0; |
| 489 | } | ||
| 490 | EXPORT_SYMBOL_GPL(plugger_remove_plug); | ||
| 491 | 495 | ||
| 496 | mdcb->mddev = mddev; | ||
| 497 | mdcb->cb.callback = plugger_unplug; | ||
| 498 | atomic_inc(&mddev->plug_cnt); | ||
| 499 | list_add(&mdcb->cb.list, &plug->cb_list); | ||
| 500 | return 1; | ||
| 501 | } | ||
| 502 | EXPORT_SYMBOL_GPL(mddev_check_plugged); | ||
| 492 | 503 | ||
| 493 | static inline mddev_t *mddev_get(mddev_t *mddev) | 504 | static inline mddev_t *mddev_get(mddev_t *mddev) |
| 494 | { | 505 | { |
| @@ -538,6 +549,7 @@ void mddev_init(mddev_t *mddev) | |||
| 538 | atomic_set(&mddev->active, 1); | 549 | atomic_set(&mddev->active, 1); |
| 539 | atomic_set(&mddev->openers, 0); | 550 | atomic_set(&mddev->openers, 0); |
| 540 | atomic_set(&mddev->active_io, 0); | 551 | atomic_set(&mddev->active_io, 0); |
| 552 | atomic_set(&mddev->plug_cnt, 0); | ||
| 541 | spin_lock_init(&mddev->write_lock); | 553 | spin_lock_init(&mddev->write_lock); |
| 542 | atomic_set(&mddev->flush_pending, 0); | 554 | atomic_set(&mddev->flush_pending, 0); |
| 543 | init_waitqueue_head(&mddev->sb_wait); | 555 | init_waitqueue_head(&mddev->sb_wait); |
| @@ -4723,7 +4735,6 @@ static void md_clean(mddev_t *mddev) | |||
| 4723 | mddev->bitmap_info.chunksize = 0; | 4735 | mddev->bitmap_info.chunksize = 0; |
| 4724 | mddev->bitmap_info.daemon_sleep = 0; | 4736 | mddev->bitmap_info.daemon_sleep = 0; |
| 4725 | mddev->bitmap_info.max_write_behind = 0; | 4737 | mddev->bitmap_info.max_write_behind = 0; |
| 4726 | mddev->plug = NULL; | ||
| 4727 | } | 4738 | } |
| 4728 | 4739 | ||
| 4729 | static void __md_stop_writes(mddev_t *mddev) | 4740 | static void __md_stop_writes(mddev_t *mddev) |
| @@ -6688,12 +6699,6 @@ int md_allow_write(mddev_t *mddev) | |||
| 6688 | } | 6699 | } |
| 6689 | EXPORT_SYMBOL_GPL(md_allow_write); | 6700 | EXPORT_SYMBOL_GPL(md_allow_write); |
| 6690 | 6701 | ||
| 6691 | void md_unplug(mddev_t *mddev) | ||
| 6692 | { | ||
| 6693 | if (mddev->plug) | ||
| 6694 | mddev->plug->unplug_fn(mddev->plug); | ||
| 6695 | } | ||
| 6696 | |||
| 6697 | #define SYNC_MARKS 10 | 6702 | #define SYNC_MARKS 10 |
| 6698 | #define SYNC_MARK_STEP (3*HZ) | 6703 | #define SYNC_MARK_STEP (3*HZ) |
| 6699 | void md_do_sync(mddev_t *mddev) | 6704 | void md_do_sync(mddev_t *mddev) |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 52b407369e13..0b1fd3f1d85b 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
| @@ -29,26 +29,6 @@ | |||
| 29 | typedef struct mddev_s mddev_t; | 29 | typedef struct mddev_s mddev_t; |
| 30 | typedef struct mdk_rdev_s mdk_rdev_t; | 30 | typedef struct mdk_rdev_s mdk_rdev_t; |
| 31 | 31 | ||
| 32 | /* generic plugging support - like that provided with request_queue, | ||
| 33 | * but does not require a request_queue | ||
| 34 | */ | ||
| 35 | struct plug_handle { | ||
| 36 | void (*unplug_fn)(struct plug_handle *); | ||
| 37 | struct timer_list unplug_timer; | ||
| 38 | struct work_struct unplug_work; | ||
| 39 | unsigned long unplug_flag; | ||
| 40 | }; | ||
| 41 | #define PLUGGED_FLAG 1 | ||
| 42 | void plugger_init(struct plug_handle *plug, | ||
| 43 | void (*unplug_fn)(struct plug_handle *)); | ||
| 44 | void plugger_set_plug(struct plug_handle *plug); | ||
| 45 | int plugger_remove_plug(struct plug_handle *plug); | ||
| 46 | static inline void plugger_flush(struct plug_handle *plug) | ||
| 47 | { | ||
| 48 | del_timer_sync(&plug->unplug_timer); | ||
| 49 | cancel_work_sync(&plug->unplug_work); | ||
| 50 | } | ||
| 51 | |||
| 52 | /* | 32 | /* |
| 53 | * MD's 'extended' device | 33 | * MD's 'extended' device |
| 54 | */ | 34 | */ |
| @@ -199,6 +179,9 @@ struct mddev_s | |||
| 199 | int delta_disks, new_level, new_layout; | 179 | int delta_disks, new_level, new_layout; |
| 200 | int new_chunk_sectors; | 180 | int new_chunk_sectors; |
| 201 | 181 | ||
| 182 | atomic_t plug_cnt; /* If device is expecting | ||
| 183 | * more bios soon. | ||
| 184 | */ | ||
| 202 | struct mdk_thread_s *thread; /* management thread */ | 185 | struct mdk_thread_s *thread; /* management thread */ |
| 203 | struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ | 186 | struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ |
| 204 | sector_t curr_resync; /* last block scheduled */ | 187 | sector_t curr_resync; /* last block scheduled */ |
| @@ -336,7 +319,6 @@ struct mddev_s | |||
| 336 | struct list_head all_mddevs; | 319 | struct list_head all_mddevs; |
| 337 | 320 | ||
| 338 | struct attribute_group *to_remove; | 321 | struct attribute_group *to_remove; |
| 339 | struct plug_handle *plug; /* if used by personality */ | ||
| 340 | 322 | ||
| 341 | struct bio_set *bio_set; | 323 | struct bio_set *bio_set; |
| 342 | 324 | ||
| @@ -516,7 +498,6 @@ extern int md_integrity_register(mddev_t *mddev); | |||
| 516 | extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev); | 498 | extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev); |
| 517 | extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); | 499 | extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); |
| 518 | extern void restore_bitmap_write_access(struct file *file); | 500 | extern void restore_bitmap_write_access(struct file *file); |
| 519 | extern void md_unplug(mddev_t *mddev); | ||
| 520 | 501 | ||
| 521 | extern void mddev_init(mddev_t *mddev); | 502 | extern void mddev_init(mddev_t *mddev); |
| 522 | extern int md_run(mddev_t *mddev); | 503 | extern int md_run(mddev_t *mddev); |
| @@ -530,4 +511,5 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, | |||
| 530 | mddev_t *mddev); | 511 | mddev_t *mddev); |
| 531 | extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, | 512 | extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, |
| 532 | mddev_t *mddev); | 513 | mddev_t *mddev); |
| 514 | extern int mddev_check_plugged(mddev_t *mddev); | ||
| 533 | #endif /* _MD_MD_H */ | 515 | #endif /* _MD_MD_H */ |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index c2a21ae56d97..2b7a7ff401dc 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -565,12 +565,6 @@ static void flush_pending_writes(conf_t *conf) | |||
| 565 | spin_unlock_irq(&conf->device_lock); | 565 | spin_unlock_irq(&conf->device_lock); |
| 566 | } | 566 | } |
| 567 | 567 | ||
| 568 | static void md_kick_device(mddev_t *mddev) | ||
| 569 | { | ||
| 570 | blk_flush_plug(current); | ||
| 571 | md_wakeup_thread(mddev->thread); | ||
| 572 | } | ||
| 573 | |||
| 574 | /* Barriers.... | 568 | /* Barriers.... |
| 575 | * Sometimes we need to suspend IO while we do something else, | 569 | * Sometimes we need to suspend IO while we do something else, |
| 576 | * either some resync/recovery, or reconfigure the array. | 570 | * either some resync/recovery, or reconfigure the array. |
| @@ -600,7 +594,7 @@ static void raise_barrier(conf_t *conf) | |||
| 600 | 594 | ||
| 601 | /* Wait until no block IO is waiting */ | 595 | /* Wait until no block IO is waiting */ |
| 602 | wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, | 596 | wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, |
| 603 | conf->resync_lock, md_kick_device(conf->mddev)); | 597 | conf->resync_lock, ); |
| 604 | 598 | ||
| 605 | /* block any new IO from starting */ | 599 | /* block any new IO from starting */ |
| 606 | conf->barrier++; | 600 | conf->barrier++; |
| @@ -608,7 +602,7 @@ static void raise_barrier(conf_t *conf) | |||
| 608 | /* Now wait for all pending IO to complete */ | 602 | /* Now wait for all pending IO to complete */ |
| 609 | wait_event_lock_irq(conf->wait_barrier, | 603 | wait_event_lock_irq(conf->wait_barrier, |
| 610 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, | 604 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, |
| 611 | conf->resync_lock, md_kick_device(conf->mddev)); | 605 | conf->resync_lock, ); |
| 612 | 606 | ||
| 613 | spin_unlock_irq(&conf->resync_lock); | 607 | spin_unlock_irq(&conf->resync_lock); |
| 614 | } | 608 | } |
| @@ -630,7 +624,7 @@ static void wait_barrier(conf_t *conf) | |||
| 630 | conf->nr_waiting++; | 624 | conf->nr_waiting++; |
| 631 | wait_event_lock_irq(conf->wait_barrier, !conf->barrier, | 625 | wait_event_lock_irq(conf->wait_barrier, !conf->barrier, |
| 632 | conf->resync_lock, | 626 | conf->resync_lock, |
| 633 | md_kick_device(conf->mddev)); | 627 | ); |
| 634 | conf->nr_waiting--; | 628 | conf->nr_waiting--; |
| 635 | } | 629 | } |
| 636 | conf->nr_pending++; | 630 | conf->nr_pending++; |
| @@ -666,8 +660,7 @@ static void freeze_array(conf_t *conf) | |||
| 666 | wait_event_lock_irq(conf->wait_barrier, | 660 | wait_event_lock_irq(conf->wait_barrier, |
| 667 | conf->nr_pending == conf->nr_queued+1, | 661 | conf->nr_pending == conf->nr_queued+1, |
| 668 | conf->resync_lock, | 662 | conf->resync_lock, |
| 669 | ({ flush_pending_writes(conf); | 663 | flush_pending_writes(conf)); |
| 670 | md_kick_device(conf->mddev); })); | ||
| 671 | spin_unlock_irq(&conf->resync_lock); | 664 | spin_unlock_irq(&conf->resync_lock); |
| 672 | } | 665 | } |
| 673 | static void unfreeze_array(conf_t *conf) | 666 | static void unfreeze_array(conf_t *conf) |
| @@ -729,6 +722,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
| 729 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); | 722 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); |
| 730 | const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); | 723 | const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); |
| 731 | mdk_rdev_t *blocked_rdev; | 724 | mdk_rdev_t *blocked_rdev; |
| 725 | int plugged; | ||
| 732 | 726 | ||
| 733 | /* | 727 | /* |
| 734 | * Register the new request and wait if the reconstruction | 728 | * Register the new request and wait if the reconstruction |
| @@ -820,6 +814,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
| 820 | * inc refcount on their rdev. Record them by setting | 814 | * inc refcount on their rdev. Record them by setting |
| 821 | * bios[x] to bio | 815 | * bios[x] to bio |
| 822 | */ | 816 | */ |
| 817 | plugged = mddev_check_plugged(mddev); | ||
| 818 | |||
| 823 | disks = conf->raid_disks; | 819 | disks = conf->raid_disks; |
| 824 | retry_write: | 820 | retry_write: |
| 825 | blocked_rdev = NULL; | 821 | blocked_rdev = NULL; |
| @@ -925,7 +921,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
| 925 | /* In case raid1d snuck in to freeze_array */ | 921 | /* In case raid1d snuck in to freeze_array */ |
| 926 | wake_up(&conf->wait_barrier); | 922 | wake_up(&conf->wait_barrier); |
| 927 | 923 | ||
| 928 | if (do_sync || !bitmap) | 924 | if (do_sync || !bitmap || !plugged) |
| 929 | md_wakeup_thread(mddev->thread); | 925 | md_wakeup_thread(mddev->thread); |
| 930 | 926 | ||
| 931 | return 0; | 927 | return 0; |
| @@ -1516,13 +1512,16 @@ static void raid1d(mddev_t *mddev) | |||
| 1516 | conf_t *conf = mddev->private; | 1512 | conf_t *conf = mddev->private; |
| 1517 | struct list_head *head = &conf->retry_list; | 1513 | struct list_head *head = &conf->retry_list; |
| 1518 | mdk_rdev_t *rdev; | 1514 | mdk_rdev_t *rdev; |
| 1515 | struct blk_plug plug; | ||
| 1519 | 1516 | ||
| 1520 | md_check_recovery(mddev); | 1517 | md_check_recovery(mddev); |
| 1521 | 1518 | ||
| 1519 | blk_start_plug(&plug); | ||
| 1522 | for (;;) { | 1520 | for (;;) { |
| 1523 | char b[BDEVNAME_SIZE]; | 1521 | char b[BDEVNAME_SIZE]; |
| 1524 | 1522 | ||
| 1525 | flush_pending_writes(conf); | 1523 | if (atomic_read(&mddev->plug_cnt) == 0) |
| 1524 | flush_pending_writes(conf); | ||
| 1526 | 1525 | ||
| 1527 | spin_lock_irqsave(&conf->device_lock, flags); | 1526 | spin_lock_irqsave(&conf->device_lock, flags); |
| 1528 | if (list_empty(head)) { | 1527 | if (list_empty(head)) { |
| @@ -1593,6 +1592,7 @@ static void raid1d(mddev_t *mddev) | |||
| 1593 | } | 1592 | } |
| 1594 | cond_resched(); | 1593 | cond_resched(); |
| 1595 | } | 1594 | } |
| 1595 | blk_finish_plug(&plug); | ||
| 1596 | } | 1596 | } |
| 1597 | 1597 | ||
| 1598 | 1598 | ||
| @@ -2039,7 +2039,6 @@ static int stop(mddev_t *mddev) | |||
| 2039 | 2039 | ||
| 2040 | md_unregister_thread(mddev->thread); | 2040 | md_unregister_thread(mddev->thread); |
| 2041 | mddev->thread = NULL; | 2041 | mddev->thread = NULL; |
| 2042 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | ||
| 2043 | if (conf->r1bio_pool) | 2042 | if (conf->r1bio_pool) |
| 2044 | mempool_destroy(conf->r1bio_pool); | 2043 | mempool_destroy(conf->r1bio_pool); |
| 2045 | kfree(conf->mirrors); | 2044 | kfree(conf->mirrors); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 2da83d566592..8e9462626ec5 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -634,12 +634,6 @@ static void flush_pending_writes(conf_t *conf) | |||
| 634 | spin_unlock_irq(&conf->device_lock); | 634 | spin_unlock_irq(&conf->device_lock); |
| 635 | } | 635 | } |
| 636 | 636 | ||
| 637 | static void md_kick_device(mddev_t *mddev) | ||
| 638 | { | ||
| 639 | blk_flush_plug(current); | ||
| 640 | md_wakeup_thread(mddev->thread); | ||
| 641 | } | ||
| 642 | |||
| 643 | /* Barriers.... | 637 | /* Barriers.... |
| 644 | * Sometimes we need to suspend IO while we do something else, | 638 | * Sometimes we need to suspend IO while we do something else, |
| 645 | * either some resync/recovery, or reconfigure the array. | 639 | * either some resync/recovery, or reconfigure the array. |
| @@ -669,15 +663,15 @@ static void raise_barrier(conf_t *conf, int force) | |||
| 669 | 663 | ||
| 670 | /* Wait until no block IO is waiting (unless 'force') */ | 664 | /* Wait until no block IO is waiting (unless 'force') */ |
| 671 | wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, | 665 | wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, |
| 672 | conf->resync_lock, md_kick_device(conf->mddev)); | 666 | conf->resync_lock, ); |
| 673 | 667 | ||
| 674 | /* block any new IO from starting */ | 668 | /* block any new IO from starting */ |
| 675 | conf->barrier++; | 669 | conf->barrier++; |
| 676 | 670 | ||
| 677 | /* No wait for all pending IO to complete */ | 671 | /* Now wait for all pending IO to complete */ |
| 678 | wait_event_lock_irq(conf->wait_barrier, | 672 | wait_event_lock_irq(conf->wait_barrier, |
| 679 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, | 673 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, |
| 680 | conf->resync_lock, md_kick_device(conf->mddev)); | 674 | conf->resync_lock, ); |
| 681 | 675 | ||
| 682 | spin_unlock_irq(&conf->resync_lock); | 676 | spin_unlock_irq(&conf->resync_lock); |
| 683 | } | 677 | } |
| @@ -698,7 +692,7 @@ static void wait_barrier(conf_t *conf) | |||
| 698 | conf->nr_waiting++; | 692 | conf->nr_waiting++; |
| 699 | wait_event_lock_irq(conf->wait_barrier, !conf->barrier, | 693 | wait_event_lock_irq(conf->wait_barrier, !conf->barrier, |
| 700 | conf->resync_lock, | 694 | conf->resync_lock, |
| 701 | md_kick_device(conf->mddev)); | 695 | ); |
| 702 | conf->nr_waiting--; | 696 | conf->nr_waiting--; |
| 703 | } | 697 | } |
| 704 | conf->nr_pending++; | 698 | conf->nr_pending++; |
| @@ -734,8 +728,8 @@ static void freeze_array(conf_t *conf) | |||
| 734 | wait_event_lock_irq(conf->wait_barrier, | 728 | wait_event_lock_irq(conf->wait_barrier, |
| 735 | conf->nr_pending == conf->nr_queued+1, | 729 | conf->nr_pending == conf->nr_queued+1, |
| 736 | conf->resync_lock, | 730 | conf->resync_lock, |
| 737 | ({ flush_pending_writes(conf); | 731 | flush_pending_writes(conf)); |
| 738 | md_kick_device(conf->mddev); })); | 732 | |
| 739 | spin_unlock_irq(&conf->resync_lock); | 733 | spin_unlock_irq(&conf->resync_lock); |
| 740 | } | 734 | } |
| 741 | 735 | ||
| @@ -762,6 +756,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
| 762 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); | 756 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); |
| 763 | unsigned long flags; | 757 | unsigned long flags; |
| 764 | mdk_rdev_t *blocked_rdev; | 758 | mdk_rdev_t *blocked_rdev; |
| 759 | int plugged; | ||
| 765 | 760 | ||
| 766 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 761 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
| 767 | md_flush_request(mddev, bio); | 762 | md_flush_request(mddev, bio); |
| @@ -870,6 +865,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
| 870 | * inc refcount on their rdev. Record them by setting | 865 | * inc refcount on their rdev. Record them by setting |
| 871 | * bios[x] to bio | 866 | * bios[x] to bio |
| 872 | */ | 867 | */ |
| 868 | plugged = mddev_check_plugged(mddev); | ||
| 869 | |||
| 873 | raid10_find_phys(conf, r10_bio); | 870 | raid10_find_phys(conf, r10_bio); |
| 874 | retry_write: | 871 | retry_write: |
| 875 | blocked_rdev = NULL; | 872 | blocked_rdev = NULL; |
| @@ -946,9 +943,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
| 946 | /* In case raid10d snuck in to freeze_array */ | 943 | /* In case raid10d snuck in to freeze_array */ |
| 947 | wake_up(&conf->wait_barrier); | 944 | wake_up(&conf->wait_barrier); |
| 948 | 945 | ||
| 949 | if (do_sync || !mddev->bitmap) | 946 | if (do_sync || !mddev->bitmap || !plugged) |
| 950 | md_wakeup_thread(mddev->thread); | 947 | md_wakeup_thread(mddev->thread); |
| 951 | |||
| 952 | return 0; | 948 | return 0; |
| 953 | } | 949 | } |
| 954 | 950 | ||
| @@ -1640,9 +1636,11 @@ static void raid10d(mddev_t *mddev) | |||
| 1640 | conf_t *conf = mddev->private; | 1636 | conf_t *conf = mddev->private; |
| 1641 | struct list_head *head = &conf->retry_list; | 1637 | struct list_head *head = &conf->retry_list; |
| 1642 | mdk_rdev_t *rdev; | 1638 | mdk_rdev_t *rdev; |
| 1639 | struct blk_plug plug; | ||
| 1643 | 1640 | ||
| 1644 | md_check_recovery(mddev); | 1641 | md_check_recovery(mddev); |
| 1645 | 1642 | ||
| 1643 | blk_start_plug(&plug); | ||
| 1646 | for (;;) { | 1644 | for (;;) { |
| 1647 | char b[BDEVNAME_SIZE]; | 1645 | char b[BDEVNAME_SIZE]; |
| 1648 | 1646 | ||
| @@ -1716,6 +1714,7 @@ static void raid10d(mddev_t *mddev) | |||
| 1716 | } | 1714 | } |
| 1717 | cond_resched(); | 1715 | cond_resched(); |
| 1718 | } | 1716 | } |
| 1717 | blk_finish_plug(&plug); | ||
| 1719 | } | 1718 | } |
| 1720 | 1719 | ||
| 1721 | 1720 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e867ee42b152..f301e6ae220c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -27,12 +27,12 @@ | |||
| 27 | * | 27 | * |
| 28 | * We group bitmap updates into batches. Each batch has a number. | 28 | * We group bitmap updates into batches. Each batch has a number. |
| 29 | * We may write out several batches at once, but that isn't very important. | 29 | * We may write out several batches at once, but that isn't very important. |
| 30 | * conf->bm_write is the number of the last batch successfully written. | 30 | * conf->seq_write is the number of the last batch successfully written. |
| 31 | * conf->bm_flush is the number of the last batch that was closed to | 31 | * conf->seq_flush is the number of the last batch that was closed to |
| 32 | * new additions. | 32 | * new additions. |
| 33 | * When we discover that we will need to write to any block in a stripe | 33 | * When we discover that we will need to write to any block in a stripe |
| 34 | * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq | 34 | * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq |
| 35 | * the number of the batch it will be in. This is bm_flush+1. | 35 | * the number of the batch it will be in. This is seq_flush+1. |
| 36 | * When we are ready to do a write, if that batch hasn't been written yet, | 36 | * When we are ready to do a write, if that batch hasn't been written yet, |
| 37 | * we plug the array and queue the stripe for later. | 37 | * we plug the array and queue the stripe for later. |
| 38 | * When an unplug happens, we increment bm_flush, thus closing the current | 38 | * When an unplug happens, we increment bm_flush, thus closing the current |
| @@ -199,14 +199,12 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) | |||
| 199 | BUG_ON(!list_empty(&sh->lru)); | 199 | BUG_ON(!list_empty(&sh->lru)); |
| 200 | BUG_ON(atomic_read(&conf->active_stripes)==0); | 200 | BUG_ON(atomic_read(&conf->active_stripes)==0); |
| 201 | if (test_bit(STRIPE_HANDLE, &sh->state)) { | 201 | if (test_bit(STRIPE_HANDLE, &sh->state)) { |
| 202 | if (test_bit(STRIPE_DELAYED, &sh->state)) { | 202 | if (test_bit(STRIPE_DELAYED, &sh->state)) |
| 203 | list_add_tail(&sh->lru, &conf->delayed_list); | 203 | list_add_tail(&sh->lru, &conf->delayed_list); |
| 204 | plugger_set_plug(&conf->plug); | 204 | else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && |
| 205 | } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && | 205 | sh->bm_seq - conf->seq_write > 0) |
| 206 | sh->bm_seq - conf->seq_write > 0) { | ||
| 207 | list_add_tail(&sh->lru, &conf->bitmap_list); | 206 | list_add_tail(&sh->lru, &conf->bitmap_list); |
| 208 | plugger_set_plug(&conf->plug); | 207 | else { |
| 209 | } else { | ||
| 210 | clear_bit(STRIPE_BIT_DELAY, &sh->state); | 208 | clear_bit(STRIPE_BIT_DELAY, &sh->state); |
| 211 | list_add_tail(&sh->lru, &conf->handle_list); | 209 | list_add_tail(&sh->lru, &conf->handle_list); |
| 212 | } | 210 | } |
| @@ -461,7 +459,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector, | |||
| 461 | < (conf->max_nr_stripes *3/4) | 459 | < (conf->max_nr_stripes *3/4) |
| 462 | || !conf->inactive_blocked), | 460 | || !conf->inactive_blocked), |
| 463 | conf->device_lock, | 461 | conf->device_lock, |
| 464 | md_raid5_kick_device(conf)); | 462 | ); |
| 465 | conf->inactive_blocked = 0; | 463 | conf->inactive_blocked = 0; |
| 466 | } else | 464 | } else |
| 467 | init_stripe(sh, sector, previous); | 465 | init_stripe(sh, sector, previous); |
| @@ -1470,7 +1468,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) | |||
| 1470 | wait_event_lock_irq(conf->wait_for_stripe, | 1468 | wait_event_lock_irq(conf->wait_for_stripe, |
| 1471 | !list_empty(&conf->inactive_list), | 1469 | !list_empty(&conf->inactive_list), |
| 1472 | conf->device_lock, | 1470 | conf->device_lock, |
| 1473 | blk_flush_plug(current)); | 1471 | ); |
| 1474 | osh = get_free_stripe(conf); | 1472 | osh = get_free_stripe(conf); |
| 1475 | spin_unlock_irq(&conf->device_lock); | 1473 | spin_unlock_irq(&conf->device_lock); |
| 1476 | atomic_set(&nsh->count, 1); | 1474 | atomic_set(&nsh->count, 1); |
| @@ -3623,8 +3621,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf) | |||
| 3623 | atomic_inc(&conf->preread_active_stripes); | 3621 | atomic_inc(&conf->preread_active_stripes); |
| 3624 | list_add_tail(&sh->lru, &conf->hold_list); | 3622 | list_add_tail(&sh->lru, &conf->hold_list); |
| 3625 | } | 3623 | } |
| 3626 | } else | 3624 | } |
| 3627 | plugger_set_plug(&conf->plug); | ||
| 3628 | } | 3625 | } |
| 3629 | 3626 | ||
| 3630 | static void activate_bit_delay(raid5_conf_t *conf) | 3627 | static void activate_bit_delay(raid5_conf_t *conf) |
| @@ -3641,21 +3638,6 @@ static void activate_bit_delay(raid5_conf_t *conf) | |||
| 3641 | } | 3638 | } |
| 3642 | } | 3639 | } |
| 3643 | 3640 | ||
| 3644 | void md_raid5_kick_device(raid5_conf_t *conf) | ||
| 3645 | { | ||
| 3646 | blk_flush_plug(current); | ||
| 3647 | raid5_activate_delayed(conf); | ||
| 3648 | md_wakeup_thread(conf->mddev->thread); | ||
| 3649 | } | ||
| 3650 | EXPORT_SYMBOL_GPL(md_raid5_kick_device); | ||
| 3651 | |||
| 3652 | static void raid5_unplug(struct plug_handle *plug) | ||
| 3653 | { | ||
| 3654 | raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug); | ||
| 3655 | |||
| 3656 | md_raid5_kick_device(conf); | ||
| 3657 | } | ||
| 3658 | |||
| 3659 | int md_raid5_congested(mddev_t *mddev, int bits) | 3641 | int md_raid5_congested(mddev_t *mddev, int bits) |
| 3660 | { | 3642 | { |
| 3661 | raid5_conf_t *conf = mddev->private; | 3643 | raid5_conf_t *conf = mddev->private; |
| @@ -3945,6 +3927,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) | |||
| 3945 | struct stripe_head *sh; | 3927 | struct stripe_head *sh; |
| 3946 | const int rw = bio_data_dir(bi); | 3928 | const int rw = bio_data_dir(bi); |
| 3947 | int remaining; | 3929 | int remaining; |
| 3930 | int plugged; | ||
| 3948 | 3931 | ||
| 3949 | if (unlikely(bi->bi_rw & REQ_FLUSH)) { | 3932 | if (unlikely(bi->bi_rw & REQ_FLUSH)) { |
| 3950 | md_flush_request(mddev, bi); | 3933 | md_flush_request(mddev, bi); |
| @@ -3963,6 +3946,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) | |||
| 3963 | bi->bi_next = NULL; | 3946 | bi->bi_next = NULL; |
| 3964 | bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ | 3947 | bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ |
| 3965 | 3948 | ||
| 3949 | plugged = mddev_check_plugged(mddev); | ||
| 3966 | for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { | 3950 | for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { |
| 3967 | DEFINE_WAIT(w); | 3951 | DEFINE_WAIT(w); |
| 3968 | int disks, data_disks; | 3952 | int disks, data_disks; |
| @@ -4057,7 +4041,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) | |||
| 4057 | * add failed due to overlap. Flush everything | 4041 | * add failed due to overlap. Flush everything |
| 4058 | * and wait a while | 4042 | * and wait a while |
| 4059 | */ | 4043 | */ |
| 4060 | md_raid5_kick_device(conf); | 4044 | md_wakeup_thread(mddev->thread); |
| 4061 | release_stripe(sh); | 4045 | release_stripe(sh); |
| 4062 | schedule(); | 4046 | schedule(); |
| 4063 | goto retry; | 4047 | goto retry; |
| @@ -4077,6 +4061,9 @@ static int make_request(mddev_t *mddev, struct bio * bi) | |||
| 4077 | } | 4061 | } |
| 4078 | 4062 | ||
| 4079 | } | 4063 | } |
| 4064 | if (!plugged) | ||
| 4065 | md_wakeup_thread(mddev->thread); | ||
| 4066 | |||
| 4080 | spin_lock_irq(&conf->device_lock); | 4067 | spin_lock_irq(&conf->device_lock); |
| 4081 | remaining = raid5_dec_bi_phys_segments(bi); | 4068 | remaining = raid5_dec_bi_phys_segments(bi); |
| 4082 | spin_unlock_irq(&conf->device_lock); | 4069 | spin_unlock_irq(&conf->device_lock); |
| @@ -4478,24 +4465,30 @@ static void raid5d(mddev_t *mddev) | |||
| 4478 | struct stripe_head *sh; | 4465 | struct stripe_head *sh; |
| 4479 | raid5_conf_t *conf = mddev->private; | 4466 | raid5_conf_t *conf = mddev->private; |
| 4480 | int handled; | 4467 | int handled; |
| 4468 | struct blk_plug plug; | ||
| 4481 | 4469 | ||
| 4482 | pr_debug("+++ raid5d active\n"); | 4470 | pr_debug("+++ raid5d active\n"); |
| 4483 | 4471 | ||
| 4484 | md_check_recovery(mddev); | 4472 | md_check_recovery(mddev); |
| 4485 | 4473 | ||
| 4474 | blk_start_plug(&plug); | ||
| 4486 | handled = 0; | 4475 | handled = 0; |
| 4487 | spin_lock_irq(&conf->device_lock); | 4476 | spin_lock_irq(&conf->device_lock); |
| 4488 | while (1) { | 4477 | while (1) { |
| 4489 | struct bio *bio; | 4478 | struct bio *bio; |
| 4490 | 4479 | ||
| 4491 | if (conf->seq_flush != conf->seq_write) { | 4480 | if (atomic_read(&mddev->plug_cnt) == 0 && |
| 4492 | int seq = conf->seq_flush; | 4481 | !list_empty(&conf->bitmap_list)) { |
| 4482 | /* Now is a good time to flush some bitmap updates */ | ||
| 4483 | conf->seq_flush++; | ||
| 4493 | spin_unlock_irq(&conf->device_lock); | 4484 | spin_unlock_irq(&conf->device_lock); |
| 4494 | bitmap_unplug(mddev->bitmap); | 4485 | bitmap_unplug(mddev->bitmap); |
| 4495 | spin_lock_irq(&conf->device_lock); | 4486 | spin_lock_irq(&conf->device_lock); |
| 4496 | conf->seq_write = seq; | 4487 | conf->seq_write = conf->seq_flush; |
| 4497 | activate_bit_delay(conf); | 4488 | activate_bit_delay(conf); |
| 4498 | } | 4489 | } |
| 4490 | if (atomic_read(&mddev->plug_cnt) == 0) | ||
| 4491 | raid5_activate_delayed(conf); | ||
| 4499 | 4492 | ||
| 4500 | while ((bio = remove_bio_from_retry(conf))) { | 4493 | while ((bio = remove_bio_from_retry(conf))) { |
| 4501 | int ok; | 4494 | int ok; |
| @@ -4525,6 +4518,7 @@ static void raid5d(mddev_t *mddev) | |||
| 4525 | spin_unlock_irq(&conf->device_lock); | 4518 | spin_unlock_irq(&conf->device_lock); |
| 4526 | 4519 | ||
| 4527 | async_tx_issue_pending_all(); | 4520 | async_tx_issue_pending_all(); |
| 4521 | blk_finish_plug(&plug); | ||
| 4528 | 4522 | ||
| 4529 | pr_debug("--- raid5d inactive\n"); | 4523 | pr_debug("--- raid5d inactive\n"); |
| 4530 | } | 4524 | } |
| @@ -5141,8 +5135,6 @@ static int run(mddev_t *mddev) | |||
| 5141 | mdname(mddev)); | 5135 | mdname(mddev)); |
| 5142 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); | 5136 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); |
| 5143 | 5137 | ||
| 5144 | plugger_init(&conf->plug, raid5_unplug); | ||
| 5145 | mddev->plug = &conf->plug; | ||
| 5146 | if (mddev->queue) { | 5138 | if (mddev->queue) { |
| 5147 | int chunk_size; | 5139 | int chunk_size; |
| 5148 | /* read-ahead size must cover two whole stripes, which | 5140 | /* read-ahead size must cover two whole stripes, which |
| @@ -5192,7 +5184,6 @@ static int stop(mddev_t *mddev) | |||
| 5192 | mddev->thread = NULL; | 5184 | mddev->thread = NULL; |
| 5193 | if (mddev->queue) | 5185 | if (mddev->queue) |
| 5194 | mddev->queue->backing_dev_info.congested_fn = NULL; | 5186 | mddev->queue->backing_dev_info.congested_fn = NULL; |
| 5195 | plugger_flush(&conf->plug); /* the unplug fn references 'conf'*/ | ||
| 5196 | free_conf(conf); | 5187 | free_conf(conf); |
| 5197 | mddev->private = NULL; | 5188 | mddev->private = NULL; |
| 5198 | mddev->to_remove = &raid5_attrs_group; | 5189 | mddev->to_remove = &raid5_attrs_group; |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 8d563a4f022a..3ca77a2613ba 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
| @@ -400,8 +400,6 @@ struct raid5_private_data { | |||
| 400 | * Cleared when a sync completes. | 400 | * Cleared when a sync completes. |
| 401 | */ | 401 | */ |
| 402 | 402 | ||
| 403 | struct plug_handle plug; | ||
| 404 | |||
| 405 | /* per cpu variables */ | 403 | /* per cpu variables */ |
| 406 | struct raid5_percpu { | 404 | struct raid5_percpu { |
| 407 | struct page *spare_page; /* Used when checking P/Q in raid6 */ | 405 | struct page *spare_page; /* Used when checking P/Q in raid6 */ |
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index d01574d98870..f4c8c844b913 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c | |||
| @@ -55,6 +55,19 @@ int mfd_cell_disable(struct platform_device *pdev) | |||
| 55 | } | 55 | } |
| 56 | EXPORT_SYMBOL(mfd_cell_disable); | 56 | EXPORT_SYMBOL(mfd_cell_disable); |
| 57 | 57 | ||
| 58 | static int mfd_platform_add_cell(struct platform_device *pdev, | ||
| 59 | const struct mfd_cell *cell) | ||
| 60 | { | ||
| 61 | if (!cell) | ||
| 62 | return 0; | ||
| 63 | |||
| 64 | pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL); | ||
| 65 | if (!pdev->mfd_cell) | ||
| 66 | return -ENOMEM; | ||
| 67 | |||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | |||
| 58 | static int mfd_add_device(struct device *parent, int id, | 71 | static int mfd_add_device(struct device *parent, int id, |
| 59 | const struct mfd_cell *cell, | 72 | const struct mfd_cell *cell, |
| 60 | struct resource *mem_base, | 73 | struct resource *mem_base, |
| @@ -75,7 +88,7 @@ static int mfd_add_device(struct device *parent, int id, | |||
| 75 | 88 | ||
| 76 | pdev->dev.parent = parent; | 89 | pdev->dev.parent = parent; |
| 77 | 90 | ||
| 78 | ret = platform_device_add_data(pdev, cell, sizeof(*cell)); | 91 | ret = mfd_platform_add_cell(pdev, cell); |
| 79 | if (ret) | 92 | if (ret) |
| 80 | goto fail_res; | 93 | goto fail_res; |
| 81 | 94 | ||
| @@ -123,7 +136,6 @@ static int mfd_add_device(struct device *parent, int id, | |||
| 123 | 136 | ||
| 124 | return 0; | 137 | return 0; |
| 125 | 138 | ||
| 126 | /* platform_device_del(pdev); */ | ||
| 127 | fail_res: | 139 | fail_res: |
| 128 | kfree(res); | 140 | kfree(res); |
| 129 | fail_device: | 141 | fail_device: |
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index 20e4e9395b61..ecafa4ba238b 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c | |||
| @@ -348,15 +348,15 @@ static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep) | |||
| 348 | 348 | ||
| 349 | static int gru_irq_count[GRU_CHIPLETS_PER_BLADE]; | 349 | static int gru_irq_count[GRU_CHIPLETS_PER_BLADE]; |
| 350 | 350 | ||
| 351 | static void gru_noop(unsigned int irq) | 351 | static void gru_noop(struct irq_data *d) |
| 352 | { | 352 | { |
| 353 | } | 353 | } |
| 354 | 354 | ||
| 355 | static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = { | 355 | static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = { |
| 356 | [0 ... GRU_CHIPLETS_PER_BLADE - 1] { | 356 | [0 ... GRU_CHIPLETS_PER_BLADE - 1] { |
| 357 | .mask = gru_noop, | 357 | .irq_mask = gru_noop, |
| 358 | .unmask = gru_noop, | 358 | .irq_unmask = gru_noop, |
| 359 | .ack = gru_noop | 359 | .irq_ack = gru_noop |
| 360 | } | 360 | } |
| 361 | }; | 361 | }; |
| 362 | 362 | ||
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index d86ea8b01137..135df164a4c1 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
| @@ -781,7 +781,7 @@ static int pci_pm_resume(struct device *dev) | |||
| 781 | 781 | ||
| 782 | #endif /* !CONFIG_SUSPEND */ | 782 | #endif /* !CONFIG_SUSPEND */ |
| 783 | 783 | ||
| 784 | #ifdef CONFIG_HIBERNATION | 784 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 785 | 785 | ||
| 786 | static int pci_pm_freeze(struct device *dev) | 786 | static int pci_pm_freeze(struct device *dev) |
| 787 | { | 787 | { |
| @@ -970,7 +970,7 @@ static int pci_pm_restore(struct device *dev) | |||
| 970 | return error; | 970 | return error; |
| 971 | } | 971 | } |
| 972 | 972 | ||
| 973 | #else /* !CONFIG_HIBERNATION */ | 973 | #else /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 974 | 974 | ||
| 975 | #define pci_pm_freeze NULL | 975 | #define pci_pm_freeze NULL |
| 976 | #define pci_pm_freeze_noirq NULL | 976 | #define pci_pm_freeze_noirq NULL |
| @@ -981,7 +981,7 @@ static int pci_pm_restore(struct device *dev) | |||
| 981 | #define pci_pm_restore NULL | 981 | #define pci_pm_restore NULL |
| 982 | #define pci_pm_restore_noirq NULL | 982 | #define pci_pm_restore_noirq NULL |
| 983 | 983 | ||
| 984 | #endif /* !CONFIG_HIBERNATION */ | 984 | #endif /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 985 | 985 | ||
| 986 | #ifdef CONFIG_PM_RUNTIME | 986 | #ifdef CONFIG_PM_RUNTIME |
| 987 | 987 | ||
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 2ee442c2a5db..0485e394712a 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
| @@ -187,7 +187,8 @@ config MSI_LAPTOP | |||
| 187 | depends on ACPI | 187 | depends on ACPI |
| 188 | depends on BACKLIGHT_CLASS_DEVICE | 188 | depends on BACKLIGHT_CLASS_DEVICE |
| 189 | depends on RFKILL | 189 | depends on RFKILL |
| 190 | depends on SERIO_I8042 | 190 | depends on INPUT && SERIO_I8042 |
| 191 | select INPUT_SPARSEKMAP | ||
| 191 | ---help--- | 192 | ---help--- |
| 192 | This is a driver for laptops built by MSI (MICRO-STAR | 193 | This is a driver for laptops built by MSI (MICRO-STAR |
| 193 | INTERNATIONAL): | 194 | INTERNATIONAL): |
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 5ea6c3477d17..ac4e7f83ce6c 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c | |||
| @@ -89,7 +89,7 @@ MODULE_LICENSE("GPL"); | |||
| 89 | #define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026" | 89 | #define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026" |
| 90 | 90 | ||
| 91 | MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); | 91 | MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); |
| 92 | MODULE_ALIAS("wmi:6AF4F258-B401-42Fd-BE91-3D4AC2D7C0D3"); | 92 | MODULE_ALIAS("wmi:6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"); |
| 93 | MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); | 93 | MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); |
| 94 | 94 | ||
| 95 | enum acer_wmi_event_ids { | 95 | enum acer_wmi_event_ids { |
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index efc776cb0c66..832a3fd7c1c8 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c | |||
| @@ -201,8 +201,8 @@ static int asus_wmi_input_init(struct asus_wmi *asus) | |||
| 201 | if (!asus->inputdev) | 201 | if (!asus->inputdev) |
| 202 | return -ENOMEM; | 202 | return -ENOMEM; |
| 203 | 203 | ||
| 204 | asus->inputdev->name = asus->driver->input_phys; | 204 | asus->inputdev->name = asus->driver->input_name; |
| 205 | asus->inputdev->phys = asus->driver->input_name; | 205 | asus->inputdev->phys = asus->driver->input_phys; |
| 206 | asus->inputdev->id.bustype = BUS_HOST; | 206 | asus->inputdev->id.bustype = BUS_HOST; |
| 207 | asus->inputdev->dev.parent = &asus->platform_device->dev; | 207 | asus->inputdev->dev.parent = &asus->platform_device->dev; |
| 208 | 208 | ||
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c index 0ddc434fb93b..649dcadd8ea3 100644 --- a/drivers/platform/x86/eeepc-wmi.c +++ b/drivers/platform/x86/eeepc-wmi.c | |||
| @@ -67,9 +67,11 @@ static const struct key_entry eeepc_wmi_keymap[] = { | |||
| 67 | { KE_KEY, 0x82, { KEY_CAMERA } }, | 67 | { KE_KEY, 0x82, { KEY_CAMERA } }, |
| 68 | { KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } }, | 68 | { KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } }, |
| 69 | { KE_KEY, 0x88, { KEY_WLAN } }, | 69 | { KE_KEY, 0x88, { KEY_WLAN } }, |
| 70 | { KE_KEY, 0xbd, { KEY_CAMERA } }, | ||
| 70 | { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, | 71 | { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, |
| 71 | { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */ | 72 | { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */ |
| 72 | { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */ | 73 | { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */ |
| 74 | { KE_KEY, 0xe8, { KEY_SCREENLOCK } }, | ||
| 73 | { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } }, | 75 | { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } }, |
| 74 | { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } }, | 76 | { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } }, |
| 75 | { KE_KEY, 0xec, { KEY_CAMERA_UP } }, | 77 | { KE_KEY, 0xec, { KEY_CAMERA_UP } }, |
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c index d653104b59cb..464bb3fc4d88 100644 --- a/drivers/platform/x86/intel_pmic_gpio.c +++ b/drivers/platform/x86/intel_pmic_gpio.c | |||
| @@ -74,6 +74,19 @@ struct pmic_gpio { | |||
| 74 | u32 trigger_type; | 74 | u32 trigger_type; |
| 75 | }; | 75 | }; |
| 76 | 76 | ||
| 77 | static void pmic_program_irqtype(int gpio, int type) | ||
| 78 | { | ||
| 79 | if (type & IRQ_TYPE_EDGE_RISING) | ||
| 80 | intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20); | ||
| 81 | else | ||
| 82 | intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20); | ||
| 83 | |||
| 84 | if (type & IRQ_TYPE_EDGE_FALLING) | ||
| 85 | intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10); | ||
| 86 | else | ||
| 87 | intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10); | ||
| 88 | }; | ||
| 89 | |||
| 77 | static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | 90 | static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) |
| 78 | { | 91 | { |
| 79 | if (offset > 8) { | 92 | if (offset > 8) { |
| @@ -166,16 +179,38 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) | |||
| 166 | return pg->irq_base + offset; | 179 | return pg->irq_base + offset; |
| 167 | } | 180 | } |
| 168 | 181 | ||
| 182 | static void pmic_bus_lock(struct irq_data *data) | ||
| 183 | { | ||
| 184 | struct pmic_gpio *pg = irq_data_get_irq_chip_data(data); | ||
| 185 | |||
| 186 | mutex_lock(&pg->buslock); | ||
| 187 | } | ||
| 188 | |||
| 189 | static void pmic_bus_sync_unlock(struct irq_data *data) | ||
| 190 | { | ||
| 191 | struct pmic_gpio *pg = irq_data_get_irq_chip_data(data); | ||
| 192 | |||
| 193 | if (pg->update_type) { | ||
| 194 | unsigned int gpio = pg->update_type & ~GPIO_UPDATE_TYPE; | ||
| 195 | |||
| 196 | pmic_program_irqtype(gpio, pg->trigger_type); | ||
| 197 | pg->update_type = 0; | ||
| 198 | } | ||
| 199 | mutex_unlock(&pg->buslock); | ||
| 200 | } | ||
| 201 | |||
| 169 | /* the gpiointr register is read-clear, so just do nothing. */ | 202 | /* the gpiointr register is read-clear, so just do nothing. */ |
| 170 | static void pmic_irq_unmask(struct irq_data *data) { } | 203 | static void pmic_irq_unmask(struct irq_data *data) { } |
| 171 | 204 | ||
| 172 | static void pmic_irq_mask(struct irq_data *data) { } | 205 | static void pmic_irq_mask(struct irq_data *data) { } |
| 173 | 206 | ||
| 174 | static struct irq_chip pmic_irqchip = { | 207 | static struct irq_chip pmic_irqchip = { |
| 175 | .name = "PMIC-GPIO", | 208 | .name = "PMIC-GPIO", |
| 176 | .irq_mask = pmic_irq_mask, | 209 | .irq_mask = pmic_irq_mask, |
| 177 | .irq_unmask = pmic_irq_unmask, | 210 | .irq_unmask = pmic_irq_unmask, |
| 178 | .irq_set_type = pmic_irq_type, | 211 | .irq_set_type = pmic_irq_type, |
| 212 | .irq_bus_lock = pmic_bus_lock, | ||
| 213 | .irq_bus_sync_unlock = pmic_bus_sync_unlock, | ||
| 179 | }; | 214 | }; |
| 180 | 215 | ||
| 181 | static irqreturn_t pmic_irq_handler(int irq, void *data) | 216 | static irqreturn_t pmic_irq_handler(int irq, void *data) |
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index de434c6dc2d6..d347116d150e 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c | |||
| @@ -571,6 +571,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { | |||
| 571 | .callback = dmi_check_cb, | 571 | .callback = dmi_check_cb, |
| 572 | }, | 572 | }, |
| 573 | { | 573 | { |
| 574 | .ident = "R410 Plus", | ||
| 575 | .matches = { | ||
| 576 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 577 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 578 | DMI_MATCH(DMI_PRODUCT_NAME, "R410P"), | ||
| 579 | DMI_MATCH(DMI_BOARD_NAME, "R460"), | ||
| 580 | }, | ||
| 581 | .callback = dmi_check_cb, | ||
| 582 | }, | ||
| 583 | { | ||
| 574 | .ident = "R518", | 584 | .ident = "R518", |
| 575 | .matches = { | 585 | .matches = { |
| 576 | DMI_MATCH(DMI_SYS_VENDOR, | 586 | DMI_MATCH(DMI_SYS_VENDOR, |
| @@ -591,12 +601,12 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { | |||
| 591 | .callback = dmi_check_cb, | 601 | .callback = dmi_check_cb, |
| 592 | }, | 602 | }, |
| 593 | { | 603 | { |
| 594 | .ident = "N150/N210/N220", | 604 | .ident = "N150/N210/N220/N230", |
| 595 | .matches = { | 605 | .matches = { |
| 596 | DMI_MATCH(DMI_SYS_VENDOR, | 606 | DMI_MATCH(DMI_SYS_VENDOR, |
| 597 | "SAMSUNG ELECTRONICS CO., LTD."), | 607 | "SAMSUNG ELECTRONICS CO., LTD."), |
| 598 | DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"), | 608 | DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"), |
| 599 | DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"), | 609 | DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"), |
| 600 | }, | 610 | }, |
| 601 | .callback = dmi_check_cb, | 611 | .callback = dmi_check_cb, |
| 602 | }, | 612 | }, |
| @@ -771,6 +781,7 @@ static int __init samsung_init(void) | |||
| 771 | 781 | ||
| 772 | /* create a backlight device to talk to this one */ | 782 | /* create a backlight device to talk to this one */ |
| 773 | memset(&props, 0, sizeof(struct backlight_properties)); | 783 | memset(&props, 0, sizeof(struct backlight_properties)); |
| 784 | props.type = BACKLIGHT_PLATFORM; | ||
| 774 | props.max_brightness = sabi_config->max_brightness; | 785 | props.max_brightness = sabi_config->max_brightness; |
| 775 | backlight_device = backlight_device_register("samsung", &sdev->dev, | 786 | backlight_device = backlight_device_register("samsung", &sdev->dev, |
| 776 | NULL, &backlight_ops, | 787 | NULL, &backlight_ops, |
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index e642f5f29504..8f709aec4da0 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c | |||
| @@ -138,6 +138,8 @@ MODULE_PARM_DESC(kbd_backlight_timeout, | |||
| 138 | "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " | 138 | "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " |
| 139 | "(default: 0)"); | 139 | "(default: 0)"); |
| 140 | 140 | ||
| 141 | static void sony_nc_kbd_backlight_resume(void); | ||
| 142 | |||
| 141 | enum sony_nc_rfkill { | 143 | enum sony_nc_rfkill { |
| 142 | SONY_WIFI, | 144 | SONY_WIFI, |
| 143 | SONY_BLUETOOTH, | 145 | SONY_BLUETOOTH, |
| @@ -771,11 +773,6 @@ static int sony_nc_handles_setup(struct platform_device *pd) | |||
| 771 | if (!handles) | 773 | if (!handles) |
| 772 | return -ENOMEM; | 774 | return -ENOMEM; |
| 773 | 775 | ||
| 774 | sysfs_attr_init(&handles->devattr.attr); | ||
| 775 | handles->devattr.attr.name = "handles"; | ||
| 776 | handles->devattr.attr.mode = S_IRUGO; | ||
| 777 | handles->devattr.show = sony_nc_handles_show; | ||
| 778 | |||
| 779 | for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { | 776 | for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { |
| 780 | if (!acpi_callsetfunc(sony_nc_acpi_handle, | 777 | if (!acpi_callsetfunc(sony_nc_acpi_handle, |
| 781 | "SN00", i + 0x20, &result)) { | 778 | "SN00", i + 0x20, &result)) { |
| @@ -785,11 +782,18 @@ static int sony_nc_handles_setup(struct platform_device *pd) | |||
| 785 | } | 782 | } |
| 786 | } | 783 | } |
| 787 | 784 | ||
| 788 | /* allow reading capabilities via sysfs */ | 785 | if (debug) { |
| 789 | if (device_create_file(&pd->dev, &handles->devattr)) { | 786 | sysfs_attr_init(&handles->devattr.attr); |
| 790 | kfree(handles); | 787 | handles->devattr.attr.name = "handles"; |
| 791 | handles = NULL; | 788 | handles->devattr.attr.mode = S_IRUGO; |
| 792 | return -1; | 789 | handles->devattr.show = sony_nc_handles_show; |
| 790 | |||
| 791 | /* allow reading capabilities via sysfs */ | ||
| 792 | if (device_create_file(&pd->dev, &handles->devattr)) { | ||
| 793 | kfree(handles); | ||
| 794 | handles = NULL; | ||
| 795 | return -1; | ||
| 796 | } | ||
| 793 | } | 797 | } |
| 794 | 798 | ||
| 795 | return 0; | 799 | return 0; |
| @@ -798,7 +802,8 @@ static int sony_nc_handles_setup(struct platform_device *pd) | |||
| 798 | static int sony_nc_handles_cleanup(struct platform_device *pd) | 802 | static int sony_nc_handles_cleanup(struct platform_device *pd) |
| 799 | { | 803 | { |
| 800 | if (handles) { | 804 | if (handles) { |
| 801 | device_remove_file(&pd->dev, &handles->devattr); | 805 | if (debug) |
| 806 | device_remove_file(&pd->dev, &handles->devattr); | ||
| 802 | kfree(handles); | 807 | kfree(handles); |
| 803 | handles = NULL; | 808 | handles = NULL; |
| 804 | } | 809 | } |
| @@ -808,6 +813,11 @@ static int sony_nc_handles_cleanup(struct platform_device *pd) | |||
| 808 | static int sony_find_snc_handle(int handle) | 813 | static int sony_find_snc_handle(int handle) |
| 809 | { | 814 | { |
| 810 | int i; | 815 | int i; |
| 816 | |||
| 817 | /* not initialized yet, return early */ | ||
| 818 | if (!handles) | ||
| 819 | return -1; | ||
| 820 | |||
| 811 | for (i = 0; i < 0x10; i++) { | 821 | for (i = 0; i < 0x10; i++) { |
| 812 | if (handles->cap[i] == handle) { | 822 | if (handles->cap[i] == handle) { |
| 813 | dprintk("found handle 0x%.4x (offset: 0x%.2x)\n", | 823 | dprintk("found handle 0x%.4x (offset: 0x%.2x)\n", |
| @@ -1168,6 +1178,9 @@ static int sony_nc_resume(struct acpi_device *device) | |||
| 1168 | /* re-read rfkill state */ | 1178 | /* re-read rfkill state */ |
| 1169 | sony_nc_rfkill_update(); | 1179 | sony_nc_rfkill_update(); |
| 1170 | 1180 | ||
| 1181 | /* restore kbd backlight states */ | ||
| 1182 | sony_nc_kbd_backlight_resume(); | ||
| 1183 | |||
| 1171 | return 0; | 1184 | return 0; |
| 1172 | } | 1185 | } |
| 1173 | 1186 | ||
| @@ -1355,6 +1368,7 @@ out_no_enum: | |||
| 1355 | #define KBDBL_HANDLER 0x137 | 1368 | #define KBDBL_HANDLER 0x137 |
| 1356 | #define KBDBL_PRESENT 0xB00 | 1369 | #define KBDBL_PRESENT 0xB00 |
| 1357 | #define SET_MODE 0xC00 | 1370 | #define SET_MODE 0xC00 |
| 1371 | #define SET_STATE 0xD00 | ||
| 1358 | #define SET_TIMEOUT 0xE00 | 1372 | #define SET_TIMEOUT 0xE00 |
| 1359 | 1373 | ||
| 1360 | struct kbd_backlight { | 1374 | struct kbd_backlight { |
| @@ -1377,6 +1391,10 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value) | |||
| 1377 | (value << 0x10) | SET_MODE, &result)) | 1391 | (value << 0x10) | SET_MODE, &result)) |
| 1378 | return -EIO; | 1392 | return -EIO; |
| 1379 | 1393 | ||
| 1394 | /* Try to turn the light on/off immediately */ | ||
| 1395 | sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE, | ||
| 1396 | &result); | ||
| 1397 | |||
| 1380 | kbdbl_handle->mode = value; | 1398 | kbdbl_handle->mode = value; |
| 1381 | 1399 | ||
| 1382 | return 0; | 1400 | return 0; |
| @@ -1458,7 +1476,7 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd) | |||
| 1458 | { | 1476 | { |
| 1459 | int result; | 1477 | int result; |
| 1460 | 1478 | ||
| 1461 | if (sony_call_snc_handle(0x137, KBDBL_PRESENT, &result)) | 1479 | if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result)) |
| 1462 | return 0; | 1480 | return 0; |
| 1463 | if (!(result & 0x02)) | 1481 | if (!(result & 0x02)) |
| 1464 | return 0; | 1482 | return 0; |
| @@ -1501,13 +1519,36 @@ outkzalloc: | |||
| 1501 | static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd) | 1519 | static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd) |
| 1502 | { | 1520 | { |
| 1503 | if (kbdbl_handle) { | 1521 | if (kbdbl_handle) { |
| 1522 | int result; | ||
| 1523 | |||
| 1504 | device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); | 1524 | device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); |
| 1505 | device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr); | 1525 | device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr); |
| 1526 | |||
| 1527 | /* restore the default hw behaviour */ | ||
| 1528 | sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result); | ||
| 1529 | sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result); | ||
| 1530 | |||
| 1506 | kfree(kbdbl_handle); | 1531 | kfree(kbdbl_handle); |
| 1507 | } | 1532 | } |
| 1508 | return 0; | 1533 | return 0; |
| 1509 | } | 1534 | } |
| 1510 | 1535 | ||
| 1536 | static void sony_nc_kbd_backlight_resume(void) | ||
| 1537 | { | ||
| 1538 | int ignore = 0; | ||
| 1539 | |||
| 1540 | if (!kbdbl_handle) | ||
| 1541 | return; | ||
| 1542 | |||
| 1543 | if (kbdbl_handle->mode == 0) | ||
| 1544 | sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore); | ||
| 1545 | |||
| 1546 | if (kbdbl_handle->timeout != 0) | ||
| 1547 | sony_call_snc_handle(KBDBL_HANDLER, | ||
| 1548 | (kbdbl_handle->timeout << 0x10) | SET_TIMEOUT, | ||
| 1549 | &ignore); | ||
| 1550 | } | ||
| 1551 | |||
| 1511 | static void sony_nc_backlight_setup(void) | 1552 | static void sony_nc_backlight_setup(void) |
| 1512 | { | 1553 | { |
| 1513 | acpi_handle unused; | 1554 | acpi_handle unused; |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index a08561f5349e..efb3b6b9bcdb 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
| @@ -8618,8 +8618,7 @@ static bool __pure __init tpacpi_is_valid_fw_id(const char* const s, | |||
| 8618 | tpacpi_is_fw_digit(s[1]) && | 8618 | tpacpi_is_fw_digit(s[1]) && |
| 8619 | s[2] == t && s[3] == 'T' && | 8619 | s[2] == t && s[3] == 'T' && |
| 8620 | tpacpi_is_fw_digit(s[4]) && | 8620 | tpacpi_is_fw_digit(s[4]) && |
| 8621 | tpacpi_is_fw_digit(s[5]) && | 8621 | tpacpi_is_fw_digit(s[5]); |
| 8622 | s[6] == 'W' && s[7] == 'W'; | ||
| 8623 | } | 8622 | } |
| 8624 | 8623 | ||
| 8625 | /* returns 0 - probe ok, or < 0 - probe error. | 8624 | /* returns 0 - probe ok, or < 0 - probe error. |
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index c29719cacbca..86c9a091a2ff 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c | |||
| @@ -1171,16 +1171,17 @@ static int rio_hdid_setup(char *str) | |||
| 1171 | 1171 | ||
| 1172 | __setup("riohdid=", rio_hdid_setup); | 1172 | __setup("riohdid=", rio_hdid_setup); |
| 1173 | 1173 | ||
| 1174 | void rio_register_mport(struct rio_mport *port) | 1174 | int rio_register_mport(struct rio_mport *port) |
| 1175 | { | 1175 | { |
| 1176 | if (next_portid >= RIO_MAX_MPORTS) { | 1176 | if (next_portid >= RIO_MAX_MPORTS) { |
| 1177 | pr_err("RIO: reached specified max number of mports\n"); | 1177 | pr_err("RIO: reached specified max number of mports\n"); |
| 1178 | return; | 1178 | return 1; |
| 1179 | } | 1179 | } |
| 1180 | 1180 | ||
| 1181 | port->id = next_portid++; | 1181 | port->id = next_portid++; |
| 1182 | port->host_deviceid = rio_get_hdid(port->id); | 1182 | port->host_deviceid = rio_get_hdid(port->id); |
| 1183 | list_add_tail(&port->node, &rio_mports); | 1183 | list_add_tail(&port->node, &rio_mports); |
| 1184 | return 0; | ||
| 1184 | } | 1185 | } |
| 1185 | 1186 | ||
| 1186 | EXPORT_SYMBOL_GPL(rio_local_get_device_id); | 1187 | EXPORT_SYMBOL_GPL(rio_local_get_device_id); |
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c index 095016a9dec1..ac2701b22e71 100644 --- a/drivers/rapidio/switches/idt_gen2.c +++ b/drivers/rapidio/switches/idt_gen2.c | |||
| @@ -418,3 +418,4 @@ DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init); | |||
| 418 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init); | 418 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init); |
| 419 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init); | 419 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init); |
| 420 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init); | 420 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init); |
| 421 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1432, idtg2_switch_init); | ||
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 09b4437b3e61..39013867cbd6 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c | |||
| @@ -171,7 +171,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev, | |||
| 171 | err = __rtc_read_alarm(rtc, &alrm); | 171 | err = __rtc_read_alarm(rtc, &alrm); |
| 172 | 172 | ||
| 173 | if (!err && !rtc_valid_tm(&alrm.time)) | 173 | if (!err && !rtc_valid_tm(&alrm.time)) |
| 174 | rtc_set_alarm(rtc, &alrm); | 174 | rtc_initialize_alarm(rtc, &alrm); |
| 175 | 175 | ||
| 176 | strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE); | 176 | strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE); |
| 177 | dev_set_name(&rtc->dev, "rtc%d", id); | 177 | dev_set_name(&rtc->dev, "rtc%d", id); |
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 23719f0acbf6..ef6316acec43 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c | |||
| @@ -375,6 +375,32 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | |||
| 375 | } | 375 | } |
| 376 | EXPORT_SYMBOL_GPL(rtc_set_alarm); | 376 | EXPORT_SYMBOL_GPL(rtc_set_alarm); |
| 377 | 377 | ||
| 378 | /* Called once per device from rtc_device_register */ | ||
| 379 | int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | ||
| 380 | { | ||
| 381 | int err; | ||
| 382 | |||
| 383 | err = rtc_valid_tm(&alarm->time); | ||
| 384 | if (err != 0) | ||
| 385 | return err; | ||
| 386 | |||
| 387 | err = mutex_lock_interruptible(&rtc->ops_lock); | ||
| 388 | if (err) | ||
| 389 | return err; | ||
| 390 | |||
| 391 | rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); | ||
| 392 | rtc->aie_timer.period = ktime_set(0, 0); | ||
| 393 | if (alarm->enabled) { | ||
| 394 | rtc->aie_timer.enabled = 1; | ||
| 395 | timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node); | ||
| 396 | } | ||
| 397 | mutex_unlock(&rtc->ops_lock); | ||
| 398 | return err; | ||
| 399 | } | ||
| 400 | EXPORT_SYMBOL_GPL(rtc_initialize_alarm); | ||
| 401 | |||
| 402 | |||
| 403 | |||
| 378 | int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) | 404 | int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) |
| 379 | { | 405 | { |
| 380 | int err = mutex_lock_interruptible(&rtc->ops_lock); | 406 | int err = mutex_lock_interruptible(&rtc->ops_lock); |
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index a0fc4cf42abf..90d866272c8e 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c | |||
| @@ -250,6 +250,8 @@ static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | |||
| 250 | bfin_rtc_int_set_alarm(rtc); | 250 | bfin_rtc_int_set_alarm(rtc); |
| 251 | else | 251 | else |
| 252 | bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); | 252 | bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); |
| 253 | |||
| 254 | return 0; | ||
| 253 | } | 255 | } |
| 254 | 256 | ||
| 255 | static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) | 257 | static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) |
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c index c42006469559..c5ac03793e79 100644 --- a/drivers/rtc/rtc-mc13xxx.c +++ b/drivers/rtc/rtc-mc13xxx.c | |||
| @@ -401,6 +401,7 @@ const struct platform_device_id mc13xxx_rtc_idtable[] = { | |||
| 401 | }, { | 401 | }, { |
| 402 | .name = "mc13892-rtc", | 402 | .name = "mc13892-rtc", |
| 403 | }, | 403 | }, |
| 404 | { } | ||
| 404 | }; | 405 | }; |
| 405 | 406 | ||
| 406 | static struct platform_driver mc13xxx_rtc_driver = { | 407 | static struct platform_driver mc13xxx_rtc_driver = { |
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 714964913e5e..b3466c491cd3 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
| @@ -336,7 +336,6 @@ static void s3c_rtc_release(struct device *dev) | |||
| 336 | 336 | ||
| 337 | /* do not clear AIE here, it may be needed for wake */ | 337 | /* do not clear AIE here, it may be needed for wake */ |
| 338 | 338 | ||
| 339 | s3c_rtc_setpie(dev, 0); | ||
| 340 | free_irq(s3c_rtc_alarmno, rtc_dev); | 339 | free_irq(s3c_rtc_alarmno, rtc_dev); |
| 341 | free_irq(s3c_rtc_tickno, rtc_dev); | 340 | free_irq(s3c_rtc_tickno, rtc_dev); |
| 342 | } | 341 | } |
| @@ -408,7 +407,6 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev) | |||
| 408 | platform_set_drvdata(dev, NULL); | 407 | platform_set_drvdata(dev, NULL); |
| 409 | rtc_device_unregister(rtc); | 408 | rtc_device_unregister(rtc); |
| 410 | 409 | ||
| 411 | s3c_rtc_setpie(&dev->dev, 0); | ||
| 412 | s3c_rtc_setaie(&dev->dev, 0); | 410 | s3c_rtc_setaie(&dev->dev, 0); |
| 413 | 411 | ||
| 414 | clk_disable(rtc_clk); | 412 | clk_disable(rtc_clk); |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 6d5c7ff43f5b..ab55c2fa7ce2 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q) | |||
| 443 | &sdev->request_queue->queue_flags); | 443 | &sdev->request_queue->queue_flags); |
| 444 | if (flagset) | 444 | if (flagset) |
| 445 | queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); | 445 | queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); |
| 446 | __blk_run_queue(sdev->request_queue, false); | 446 | __blk_run_queue(sdev->request_queue); |
| 447 | if (flagset) | 447 | if (flagset) |
| 448 | queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); | 448 | queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); |
| 449 | spin_unlock(sdev->request_queue->queue_lock); | 449 | spin_unlock(sdev->request_queue->queue_lock); |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index fdf3fa639056..28c33506e4ad 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
| @@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport) | |||
| 3829 | !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); | 3829 | !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); |
| 3830 | if (flagset) | 3830 | if (flagset) |
| 3831 | queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); | 3831 | queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); |
| 3832 | __blk_run_queue(rport->rqst_q, false); | 3832 | __blk_run_queue(rport->rqst_q); |
| 3833 | if (flagset) | 3833 | if (flagset) |
| 3834 | queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); | 3834 | queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); |
| 3835 | spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); | 3835 | spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); |
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index dca4a0bb6ca9..e3786f161bc3 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig | |||
| @@ -131,8 +131,6 @@ source "drivers/staging/wlags49_h2/Kconfig" | |||
| 131 | 131 | ||
| 132 | source "drivers/staging/wlags49_h25/Kconfig" | 132 | source "drivers/staging/wlags49_h25/Kconfig" |
| 133 | 133 | ||
| 134 | source "drivers/staging/samsung-laptop/Kconfig" | ||
| 135 | |||
| 136 | source "drivers/staging/sm7xx/Kconfig" | 134 | source "drivers/staging/sm7xx/Kconfig" |
| 137 | 135 | ||
| 138 | source "drivers/staging/dt3155v4l/Kconfig" | 136 | source "drivers/staging/dt3155v4l/Kconfig" |
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index eb93012b6f59..f0d5c5315612 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile | |||
| @@ -48,7 +48,6 @@ obj-$(CONFIG_XVMALLOC) += zram/ | |||
| 48 | obj-$(CONFIG_ZCACHE) += zcache/ | 48 | obj-$(CONFIG_ZCACHE) += zcache/ |
| 49 | obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ | 49 | obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ |
| 50 | obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/ | 50 | obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/ |
| 51 | obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop/ | ||
| 52 | obj-$(CONFIG_FB_SM7XX) += sm7xx/ | 51 | obj-$(CONFIG_FB_SM7XX) += sm7xx/ |
| 53 | obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/ | 52 | obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/ |
| 54 | obj-$(CONFIG_CRYSTALHD) += crystalhd/ | 53 | obj-$(CONFIG_CRYSTALHD) += crystalhd/ |
diff --git a/drivers/staging/samsung-laptop/Kconfig b/drivers/staging/samsung-laptop/Kconfig deleted file mode 100644 index f27c60864c26..000000000000 --- a/drivers/staging/samsung-laptop/Kconfig +++ /dev/null | |||
| @@ -1,10 +0,0 @@ | |||
| 1 | config SAMSUNG_LAPTOP | ||
| 2 | tristate "Samsung Laptop driver" | ||
| 3 | default n | ||
| 4 | depends on RFKILL && BACKLIGHT_CLASS_DEVICE && X86 | ||
| 5 | help | ||
| 6 | This module implements a driver for the N128 Samsung Laptop | ||
| 7 | providing control over the Wireless LED and the LCD backlight | ||
| 8 | |||
| 9 | To compile this driver as a module, choose | ||
| 10 | M here: the module will be called samsung-laptop. | ||
diff --git a/drivers/staging/samsung-laptop/Makefile b/drivers/staging/samsung-laptop/Makefile deleted file mode 100644 index 3c6f42045211..000000000000 --- a/drivers/staging/samsung-laptop/Makefile +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o | ||
diff --git a/drivers/staging/samsung-laptop/TODO b/drivers/staging/samsung-laptop/TODO deleted file mode 100644 index f7a6d589916e..000000000000 --- a/drivers/staging/samsung-laptop/TODO +++ /dev/null | |||
| @@ -1,5 +0,0 @@ | |||
| 1 | TODO: | ||
| 2 | - review from other developers | ||
| 3 | - figure out ACPI video issues | ||
| 4 | |||
| 5 | Please send patches to Greg Kroah-Hartman <gregkh@suse.de> | ||
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c deleted file mode 100644 index 25294462b8b6..000000000000 --- a/drivers/staging/samsung-laptop/samsung-laptop.c +++ /dev/null | |||
| @@ -1,843 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Samsung Laptop driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009,2011 Greg Kroah-Hartman (gregkh@suse.de) | ||
| 5 | * Copyright (C) 2009,2011 Novell Inc. | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify it | ||
| 8 | * under the terms of the GNU General Public License version 2 as published by | ||
| 9 | * the Free Software Foundation. | ||
| 10 | * | ||
| 11 | */ | ||
| 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 13 | |||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/init.h> | ||
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/delay.h> | ||
| 18 | #include <linux/pci.h> | ||
| 19 | #include <linux/backlight.h> | ||
| 20 | #include <linux/fb.h> | ||
| 21 | #include <linux/dmi.h> | ||
| 22 | #include <linux/platform_device.h> | ||
| 23 | #include <linux/rfkill.h> | ||
| 24 | |||
| 25 | /* | ||
| 26 | * This driver is needed because a number of Samsung laptops do not hook | ||
| 27 | * their control settings through ACPI. So we have to poke around in the | ||
| 28 | * BIOS to do things like brightness values, and "special" key controls. | ||
| 29 | */ | ||
| 30 | |||
| 31 | /* | ||
| 32 | * We have 0 - 8 as valid brightness levels. The specs say that level 0 should | ||
| 33 | * be reserved by the BIOS (which really doesn't make much sense), we tell | ||
| 34 | * userspace that the value is 0 - 7 and then just tell the hardware 1 - 8 | ||
| 35 | */ | ||
| 36 | #define MAX_BRIGHT 0x07 | ||
| 37 | |||
| 38 | |||
| 39 | #define SABI_IFACE_MAIN 0x00 | ||
| 40 | #define SABI_IFACE_SUB 0x02 | ||
| 41 | #define SABI_IFACE_COMPLETE 0x04 | ||
| 42 | #define SABI_IFACE_DATA 0x05 | ||
| 43 | |||
| 44 | /* Structure to get data back to the calling function */ | ||
| 45 | struct sabi_retval { | ||
| 46 | u8 retval[20]; | ||
| 47 | }; | ||
| 48 | |||
| 49 | struct sabi_header_offsets { | ||
| 50 | u8 port; | ||
| 51 | u8 re_mem; | ||
| 52 | u8 iface_func; | ||
| 53 | u8 en_mem; | ||
| 54 | u8 data_offset; | ||
| 55 | u8 data_segment; | ||
| 56 | }; | ||
| 57 | |||
| 58 | struct sabi_commands { | ||
| 59 | /* | ||
| 60 | * Brightness is 0 - 8, as described above. | ||
| 61 | * Value 0 is for the BIOS to use | ||
| 62 | */ | ||
| 63 | u8 get_brightness; | ||
| 64 | u8 set_brightness; | ||
| 65 | |||
| 66 | /* | ||
| 67 | * first byte: | ||
| 68 | * 0x00 - wireless is off | ||
| 69 | * 0x01 - wireless is on | ||
| 70 | * second byte: | ||
| 71 | * 0x02 - 3G is off | ||
| 72 | * 0x03 - 3G is on | ||
| 73 | * TODO, verify 3G is correct, that doesn't seem right... | ||
| 74 | */ | ||
| 75 | u8 get_wireless_button; | ||
| 76 | u8 set_wireless_button; | ||
| 77 | |||
| 78 | /* 0 is off, 1 is on */ | ||
| 79 | u8 get_backlight; | ||
| 80 | u8 set_backlight; | ||
| 81 | |||
| 82 | /* | ||
| 83 | * 0x80 or 0x00 - no action | ||
| 84 | * 0x81 - recovery key pressed | ||
| 85 | */ | ||
| 86 | u8 get_recovery_mode; | ||
| 87 | u8 set_recovery_mode; | ||
| 88 | |||
| 89 | /* | ||
| 90 | * on seclinux: 0 is low, 1 is high, | ||
| 91 | * on swsmi: 0 is normal, 1 is silent, 2 is turbo | ||
| 92 | */ | ||
| 93 | u8 get_performance_level; | ||
| 94 | u8 set_performance_level; | ||
| 95 | |||
| 96 | /* | ||
| 97 | * Tell the BIOS that Linux is running on this machine. | ||
| 98 | * 81 is on, 80 is off | ||
| 99 | */ | ||
| 100 | u8 set_linux; | ||
| 101 | }; | ||
| 102 | |||
| 103 | struct sabi_performance_level { | ||
| 104 | const char *name; | ||
| 105 | u8 value; | ||
| 106 | }; | ||
| 107 | |||
| 108 | struct sabi_config { | ||
| 109 | const char *test_string; | ||
| 110 | u16 main_function; | ||
| 111 | const struct sabi_header_offsets header_offsets; | ||
| 112 | const struct sabi_commands commands; | ||
| 113 | const struct sabi_performance_level performance_levels[4]; | ||
| 114 | u8 min_brightness; | ||
| 115 | u8 max_brightness; | ||
| 116 | }; | ||
| 117 | |||
| 118 | static const struct sabi_config sabi_configs[] = { | ||
| 119 | { | ||
| 120 | .test_string = "SECLINUX", | ||
| 121 | |||
| 122 | .main_function = 0x4c49, | ||
| 123 | |||
| 124 | .header_offsets = { | ||
| 125 | .port = 0x00, | ||
| 126 | .re_mem = 0x02, | ||
| 127 | .iface_func = 0x03, | ||
| 128 | .en_mem = 0x04, | ||
| 129 | .data_offset = 0x05, | ||
| 130 | .data_segment = 0x07, | ||
| 131 | }, | ||
| 132 | |||
| 133 | .commands = { | ||
| 134 | .get_brightness = 0x00, | ||
| 135 | .set_brightness = 0x01, | ||
| 136 | |||
| 137 | .get_wireless_button = 0x02, | ||
| 138 | .set_wireless_button = 0x03, | ||
| 139 | |||
| 140 | .get_backlight = 0x04, | ||
| 141 | .set_backlight = 0x05, | ||
| 142 | |||
| 143 | .get_recovery_mode = 0x06, | ||
| 144 | .set_recovery_mode = 0x07, | ||
| 145 | |||
| 146 | .get_performance_level = 0x08, | ||
| 147 | .set_performance_level = 0x09, | ||
| 148 | |||
| 149 | .set_linux = 0x0a, | ||
| 150 | }, | ||
| 151 | |||
| 152 | .performance_levels = { | ||
| 153 | { | ||
| 154 | .name = "silent", | ||
| 155 | .value = 0, | ||
| 156 | }, | ||
| 157 | { | ||
| 158 | .name = "normal", | ||
| 159 | .value = 1, | ||
| 160 | }, | ||
| 161 | { }, | ||
| 162 | }, | ||
| 163 | .min_brightness = 1, | ||
| 164 | .max_brightness = 8, | ||
| 165 | }, | ||
| 166 | { | ||
| 167 | .test_string = "SwSmi@", | ||
| 168 | |||
| 169 | .main_function = 0x5843, | ||
| 170 | |||
| 171 | .header_offsets = { | ||
| 172 | .port = 0x00, | ||
| 173 | .re_mem = 0x04, | ||
| 174 | .iface_func = 0x02, | ||
| 175 | .en_mem = 0x03, | ||
| 176 | .data_offset = 0x05, | ||
| 177 | .data_segment = 0x07, | ||
| 178 | }, | ||
| 179 | |||
| 180 | .commands = { | ||
| 181 | .get_brightness = 0x10, | ||
| 182 | .set_brightness = 0x11, | ||
| 183 | |||
| 184 | .get_wireless_button = 0x12, | ||
| 185 | .set_wireless_button = 0x13, | ||
| 186 | |||
| 187 | .get_backlight = 0x2d, | ||
| 188 | .set_backlight = 0x2e, | ||
| 189 | |||
| 190 | .get_recovery_mode = 0xff, | ||
| 191 | .set_recovery_mode = 0xff, | ||
| 192 | |||
| 193 | .get_performance_level = 0x31, | ||
| 194 | .set_performance_level = 0x32, | ||
| 195 | |||
| 196 | .set_linux = 0xff, | ||
| 197 | }, | ||
| 198 | |||
| 199 | .performance_levels = { | ||
| 200 | { | ||
| 201 | .name = "normal", | ||
| 202 | .value = 0, | ||
| 203 | }, | ||
| 204 | { | ||
| 205 | .name = "silent", | ||
| 206 | .value = 1, | ||
| 207 | }, | ||
| 208 | { | ||
| 209 | .name = "overclock", | ||
| 210 | .value = 2, | ||
| 211 | }, | ||
| 212 | { }, | ||
| 213 | }, | ||
| 214 | .min_brightness = 0, | ||
| 215 | .max_brightness = 8, | ||
| 216 | }, | ||
| 217 | { }, | ||
| 218 | }; | ||
| 219 | |||
| 220 | static const struct sabi_config *sabi_config; | ||
| 221 | |||
| 222 | static void __iomem *sabi; | ||
| 223 | static void __iomem *sabi_iface; | ||
| 224 | static void __iomem *f0000_segment; | ||
| 225 | static struct backlight_device *backlight_device; | ||
| 226 | static struct mutex sabi_mutex; | ||
| 227 | static struct platform_device *sdev; | ||
| 228 | static struct rfkill *rfk; | ||
| 229 | |||
| 230 | static int force; | ||
| 231 | module_param(force, bool, 0); | ||
| 232 | MODULE_PARM_DESC(force, | ||
| 233 | "Disable the DMI check and forces the driver to be loaded"); | ||
| 234 | |||
| 235 | static int debug; | ||
| 236 | module_param(debug, bool, S_IRUGO | S_IWUSR); | ||
| 237 | MODULE_PARM_DESC(debug, "Debug enabled or not"); | ||
| 238 | |||
| 239 | static int sabi_get_command(u8 command, struct sabi_retval *sretval) | ||
| 240 | { | ||
| 241 | int retval = 0; | ||
| 242 | u16 port = readw(sabi + sabi_config->header_offsets.port); | ||
| 243 | u8 complete, iface_data; | ||
| 244 | |||
| 245 | mutex_lock(&sabi_mutex); | ||
| 246 | |||
| 247 | /* enable memory to be able to write to it */ | ||
| 248 | outb(readb(sabi + sabi_config->header_offsets.en_mem), port); | ||
| 249 | |||
| 250 | /* write out the command */ | ||
| 251 | writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN); | ||
| 252 | writew(command, sabi_iface + SABI_IFACE_SUB); | ||
| 253 | writeb(0, sabi_iface + SABI_IFACE_COMPLETE); | ||
| 254 | outb(readb(sabi + sabi_config->header_offsets.iface_func), port); | ||
| 255 | |||
| 256 | /* write protect memory to make it safe */ | ||
| 257 | outb(readb(sabi + sabi_config->header_offsets.re_mem), port); | ||
| 258 | |||
| 259 | /* see if the command actually succeeded */ | ||
| 260 | complete = readb(sabi_iface + SABI_IFACE_COMPLETE); | ||
| 261 | iface_data = readb(sabi_iface + SABI_IFACE_DATA); | ||
| 262 | if (complete != 0xaa || iface_data == 0xff) { | ||
| 263 | pr_warn("SABI get command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n", | ||
| 264 | command, complete, iface_data); | ||
| 265 | retval = -EINVAL; | ||
| 266 | goto exit; | ||
| 267 | } | ||
| 268 | /* | ||
| 269 | * Save off the data into a structure so the caller use it. | ||
| 270 | * Right now we only want the first 4 bytes, | ||
| 271 | * There are commands that need more, but not for the ones we | ||
| 272 | * currently care about. | ||
| 273 | */ | ||
| 274 | sretval->retval[0] = readb(sabi_iface + SABI_IFACE_DATA); | ||
| 275 | sretval->retval[1] = readb(sabi_iface + SABI_IFACE_DATA + 1); | ||
| 276 | sretval->retval[2] = readb(sabi_iface + SABI_IFACE_DATA + 2); | ||
| 277 | sretval->retval[3] = readb(sabi_iface + SABI_IFACE_DATA + 3); | ||
| 278 | |||
| 279 | exit: | ||
| 280 | mutex_unlock(&sabi_mutex); | ||
| 281 | return retval; | ||
| 282 | |||
| 283 | } | ||
| 284 | |||
| 285 | static int sabi_set_command(u8 command, u8 data) | ||
| 286 | { | ||
| 287 | int retval = 0; | ||
| 288 | u16 port = readw(sabi + sabi_config->header_offsets.port); | ||
| 289 | u8 complete, iface_data; | ||
| 290 | |||
| 291 | mutex_lock(&sabi_mutex); | ||
| 292 | |||
| 293 | /* enable memory to be able to write to it */ | ||
| 294 | outb(readb(sabi + sabi_config->header_offsets.en_mem), port); | ||
| 295 | |||
| 296 | /* write out the command */ | ||
| 297 | writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN); | ||
| 298 | writew(command, sabi_iface + SABI_IFACE_SUB); | ||
| 299 | writeb(0, sabi_iface + SABI_IFACE_COMPLETE); | ||
| 300 | writeb(data, sabi_iface + SABI_IFACE_DATA); | ||
| 301 | outb(readb(sabi + sabi_config->header_offsets.iface_func), port); | ||
| 302 | |||
| 303 | /* write protect memory to make it safe */ | ||
| 304 | outb(readb(sabi + sabi_config->header_offsets.re_mem), port); | ||
| 305 | |||
| 306 | /* see if the command actually succeeded */ | ||
| 307 | complete = readb(sabi_iface + SABI_IFACE_COMPLETE); | ||
| 308 | iface_data = readb(sabi_iface + SABI_IFACE_DATA); | ||
| 309 | if (complete != 0xaa || iface_data == 0xff) { | ||
| 310 | pr_warn("SABI set command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n", | ||
| 311 | command, complete, iface_data); | ||
| 312 | retval = -EINVAL; | ||
| 313 | } | ||
| 314 | |||
| 315 | mutex_unlock(&sabi_mutex); | ||
| 316 | return retval; | ||
| 317 | } | ||
| 318 | |||
| 319 | static void test_backlight(void) | ||
| 320 | { | ||
| 321 | struct sabi_retval sretval; | ||
| 322 | |||
| 323 | sabi_get_command(sabi_config->commands.get_backlight, &sretval); | ||
| 324 | printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); | ||
| 325 | |||
| 326 | sabi_set_command(sabi_config->commands.set_backlight, 0); | ||
| 327 | printk(KERN_DEBUG "backlight should be off\n"); | ||
| 328 | |||
| 329 | sabi_get_command(sabi_config->commands.get_backlight, &sretval); | ||
| 330 | printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); | ||
| 331 | |||
| 332 | msleep(1000); | ||
| 333 | |||
| 334 | sabi_set_command(sabi_config->commands.set_backlight, 1); | ||
| 335 | printk(KERN_DEBUG "backlight should be on\n"); | ||
| 336 | |||
| 337 | sabi_get_command(sabi_config->commands.get_backlight, &sretval); | ||
| 338 | printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); | ||
| 339 | } | ||
| 340 | |||
| 341 | static void test_wireless(void) | ||
| 342 | { | ||
| 343 | struct sabi_retval sretval; | ||
| 344 | |||
| 345 | sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); | ||
| 346 | printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); | ||
| 347 | |||
| 348 | sabi_set_command(sabi_config->commands.set_wireless_button, 0); | ||
| 349 | printk(KERN_DEBUG "wireless led should be off\n"); | ||
| 350 | |||
| 351 | sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); | ||
| 352 | printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); | ||
| 353 | |||
| 354 | msleep(1000); | ||
| 355 | |||
| 356 | sabi_set_command(sabi_config->commands.set_wireless_button, 1); | ||
| 357 | printk(KERN_DEBUG "wireless led should be on\n"); | ||
| 358 | |||
| 359 | sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); | ||
| 360 | printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); | ||
| 361 | } | ||
| 362 | |||
| 363 | static u8 read_brightness(void) | ||
| 364 | { | ||
| 365 | struct sabi_retval sretval; | ||
| 366 | int user_brightness = 0; | ||
| 367 | int retval; | ||
| 368 | |||
| 369 | retval = sabi_get_command(sabi_config->commands.get_brightness, | ||
| 370 | &sretval); | ||
| 371 | if (!retval) { | ||
| 372 | user_brightness = sretval.retval[0]; | ||
| 373 | if (user_brightness != 0) | ||
| 374 | user_brightness -= sabi_config->min_brightness; | ||
| 375 | } | ||
| 376 | return user_brightness; | ||
| 377 | } | ||
| 378 | |||
| 379 | static void set_brightness(u8 user_brightness) | ||
| 380 | { | ||
| 381 | u8 user_level = user_brightness - sabi_config->min_brightness; | ||
| 382 | |||
| 383 | sabi_set_command(sabi_config->commands.set_brightness, user_level); | ||
| 384 | } | ||
| 385 | |||
| 386 | static int get_brightness(struct backlight_device *bd) | ||
| 387 | { | ||
| 388 | return (int)read_brightness(); | ||
| 389 | } | ||
| 390 | |||
| 391 | static int update_status(struct backlight_device *bd) | ||
| 392 | { | ||
| 393 | set_brightness(bd->props.brightness); | ||
| 394 | |||
| 395 | if (bd->props.power == FB_BLANK_UNBLANK) | ||
| 396 | sabi_set_command(sabi_config->commands.set_backlight, 1); | ||
| 397 | else | ||
| 398 | sabi_set_command(sabi_config->commands.set_backlight, 0); | ||
| 399 | return 0; | ||
| 400 | } | ||
| 401 | |||
| 402 | static const struct backlight_ops backlight_ops = { | ||
| 403 | .get_brightness = get_brightness, | ||
| 404 | .update_status = update_status, | ||
| 405 | }; | ||
| 406 | |||
| 407 | static int rfkill_set(void *data, bool blocked) | ||
| 408 | { | ||
| 409 | /* Do something with blocked...*/ | ||
| 410 | /* | ||
| 411 | * blocked == false is on | ||
| 412 | * blocked == true is off | ||
| 413 | */ | ||
| 414 | if (blocked) | ||
| 415 | sabi_set_command(sabi_config->commands.set_wireless_button, 0); | ||
| 416 | else | ||
| 417 | sabi_set_command(sabi_config->commands.set_wireless_button, 1); | ||
| 418 | |||
| 419 | return 0; | ||
| 420 | } | ||
| 421 | |||
| 422 | static struct rfkill_ops rfkill_ops = { | ||
| 423 | .set_block = rfkill_set, | ||
| 424 | }; | ||
| 425 | |||
| 426 | static int init_wireless(struct platform_device *sdev) | ||
| 427 | { | ||
| 428 | int retval; | ||
| 429 | |||
| 430 | rfk = rfkill_alloc("samsung-wifi", &sdev->dev, RFKILL_TYPE_WLAN, | ||
| 431 | &rfkill_ops, NULL); | ||
| 432 | if (!rfk) | ||
| 433 | return -ENOMEM; | ||
| 434 | |||
| 435 | retval = rfkill_register(rfk); | ||
| 436 | if (retval) { | ||
| 437 | rfkill_destroy(rfk); | ||
| 438 | return -ENODEV; | ||
| 439 | } | ||
| 440 | |||
| 441 | return 0; | ||
| 442 | } | ||
| 443 | |||
| 444 | static void destroy_wireless(void) | ||
| 445 | { | ||
| 446 | rfkill_unregister(rfk); | ||
| 447 | rfkill_destroy(rfk); | ||
| 448 | } | ||
| 449 | |||
| 450 | static ssize_t get_performance_level(struct device *dev, | ||
| 451 | struct device_attribute *attr, char *buf) | ||
| 452 | { | ||
| 453 | struct sabi_retval sretval; | ||
| 454 | int retval; | ||
| 455 | int i; | ||
| 456 | |||
| 457 | /* Read the state */ | ||
| 458 | retval = sabi_get_command(sabi_config->commands.get_performance_level, | ||
| 459 | &sretval); | ||
| 460 | if (retval) | ||
| 461 | return retval; | ||
| 462 | |||
| 463 | /* The logic is backwards, yeah, lots of fun... */ | ||
| 464 | for (i = 0; sabi_config->performance_levels[i].name; ++i) { | ||
| 465 | if (sretval.retval[0] == sabi_config->performance_levels[i].value) | ||
| 466 | return sprintf(buf, "%s\n", sabi_config->performance_levels[i].name); | ||
| 467 | } | ||
| 468 | return sprintf(buf, "%s\n", "unknown"); | ||
| 469 | } | ||
| 470 | |||
| 471 | static ssize_t set_performance_level(struct device *dev, | ||
| 472 | struct device_attribute *attr, const char *buf, | ||
| 473 | size_t count) | ||
| 474 | { | ||
| 475 | if (count >= 1) { | ||
| 476 | int i; | ||
| 477 | for (i = 0; sabi_config->performance_levels[i].name; ++i) { | ||
| 478 | const struct sabi_performance_level *level = | ||
| 479 | &sabi_config->performance_levels[i]; | ||
| 480 | if (!strncasecmp(level->name, buf, strlen(level->name))) { | ||
| 481 | sabi_set_command(sabi_config->commands.set_performance_level, | ||
| 482 | level->value); | ||
| 483 | break; | ||
| 484 | } | ||
| 485 | } | ||
| 486 | if (!sabi_config->performance_levels[i].name) | ||
| 487 | return -EINVAL; | ||
| 488 | } | ||
| 489 | return count; | ||
| 490 | } | ||
| 491 | static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO, | ||
| 492 | get_performance_level, set_performance_level); | ||
| 493 | |||
| 494 | |||
| 495 | static int __init dmi_check_cb(const struct dmi_system_id *id) | ||
| 496 | { | ||
| 497 | pr_info("found laptop model '%s'\n", | ||
| 498 | id->ident); | ||
| 499 | return 0; | ||
| 500 | } | ||
| 501 | |||
| 502 | static struct dmi_system_id __initdata samsung_dmi_table[] = { | ||
| 503 | { | ||
| 504 | .ident = "N128", | ||
| 505 | .matches = { | ||
| 506 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 507 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 508 | DMI_MATCH(DMI_PRODUCT_NAME, "N128"), | ||
| 509 | DMI_MATCH(DMI_BOARD_NAME, "N128"), | ||
| 510 | }, | ||
| 511 | .callback = dmi_check_cb, | ||
| 512 | }, | ||
| 513 | { | ||
| 514 | .ident = "N130", | ||
| 515 | .matches = { | ||
| 516 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 517 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 518 | DMI_MATCH(DMI_PRODUCT_NAME, "N130"), | ||
| 519 | DMI_MATCH(DMI_BOARD_NAME, "N130"), | ||
| 520 | }, | ||
| 521 | .callback = dmi_check_cb, | ||
| 522 | }, | ||
| 523 | { | ||
| 524 | .ident = "X125", | ||
| 525 | .matches = { | ||
| 526 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 527 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 528 | DMI_MATCH(DMI_PRODUCT_NAME, "X125"), | ||
| 529 | DMI_MATCH(DMI_BOARD_NAME, "X125"), | ||
| 530 | }, | ||
| 531 | .callback = dmi_check_cb, | ||
| 532 | }, | ||
| 533 | { | ||
| 534 | .ident = "X120/X170", | ||
| 535 | .matches = { | ||
| 536 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 537 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 538 | DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"), | ||
| 539 | DMI_MATCH(DMI_BOARD_NAME, "X120/X170"), | ||
| 540 | }, | ||
| 541 | .callback = dmi_check_cb, | ||
| 542 | }, | ||
| 543 | { | ||
| 544 | .ident = "NC10", | ||
| 545 | .matches = { | ||
| 546 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 547 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 548 | DMI_MATCH(DMI_PRODUCT_NAME, "NC10"), | ||
| 549 | DMI_MATCH(DMI_BOARD_NAME, "NC10"), | ||
| 550 | }, | ||
| 551 | .callback = dmi_check_cb, | ||
| 552 | }, | ||
| 553 | { | ||
| 554 | .ident = "NP-Q45", | ||
| 555 | .matches = { | ||
| 556 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 557 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 558 | DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"), | ||
| 559 | DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"), | ||
| 560 | }, | ||
| 561 | .callback = dmi_check_cb, | ||
| 562 | }, | ||
| 563 | { | ||
| 564 | .ident = "X360", | ||
| 565 | .matches = { | ||
| 566 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 567 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 568 | DMI_MATCH(DMI_PRODUCT_NAME, "X360"), | ||
| 569 | DMI_MATCH(DMI_BOARD_NAME, "X360"), | ||
| 570 | }, | ||
| 571 | .callback = dmi_check_cb, | ||
| 572 | }, | ||
| 573 | { | ||
| 574 | .ident = "R410 Plus", | ||
| 575 | .matches = { | ||
| 576 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 577 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 578 | DMI_MATCH(DMI_PRODUCT_NAME, "R410P"), | ||
| 579 | DMI_MATCH(DMI_BOARD_NAME, "R460"), | ||
| 580 | }, | ||
| 581 | .callback = dmi_check_cb, | ||
| 582 | }, | ||
| 583 | { | ||
| 584 | .ident = "R518", | ||
| 585 | .matches = { | ||
| 586 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 587 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 588 | DMI_MATCH(DMI_PRODUCT_NAME, "R518"), | ||
| 589 | DMI_MATCH(DMI_BOARD_NAME, "R518"), | ||
| 590 | }, | ||
| 591 | .callback = dmi_check_cb, | ||
| 592 | }, | ||
| 593 | { | ||
| 594 | .ident = "R519/R719", | ||
| 595 | .matches = { | ||
| 596 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 597 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 598 | DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"), | ||
| 599 | DMI_MATCH(DMI_BOARD_NAME, "R519/R719"), | ||
| 600 | }, | ||
| 601 | .callback = dmi_check_cb, | ||
| 602 | }, | ||
| 603 | { | ||
| 604 | .ident = "N150/N210/N220/N230", | ||
| 605 | .matches = { | ||
| 606 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 607 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 608 | DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"), | ||
| 609 | DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"), | ||
| 610 | }, | ||
| 611 | .callback = dmi_check_cb, | ||
| 612 | }, | ||
| 613 | { | ||
| 614 | .ident = "N150P/N210P/N220P", | ||
| 615 | .matches = { | ||
| 616 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 617 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 618 | DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"), | ||
| 619 | DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"), | ||
| 620 | }, | ||
| 621 | .callback = dmi_check_cb, | ||
| 622 | }, | ||
| 623 | { | ||
| 624 | .ident = "R530/R730", | ||
| 625 | .matches = { | ||
| 626 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 627 | DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"), | ||
| 628 | DMI_MATCH(DMI_BOARD_NAME, "R530/R730"), | ||
| 629 | }, | ||
| 630 | .callback = dmi_check_cb, | ||
| 631 | }, | ||
| 632 | { | ||
| 633 | .ident = "NF110/NF210/NF310", | ||
| 634 | .matches = { | ||
| 635 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 636 | DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"), | ||
| 637 | DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"), | ||
| 638 | }, | ||
| 639 | .callback = dmi_check_cb, | ||
| 640 | }, | ||
| 641 | { | ||
| 642 | .ident = "N145P/N250P/N260P", | ||
| 643 | .matches = { | ||
| 644 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 645 | DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"), | ||
| 646 | DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"), | ||
| 647 | }, | ||
| 648 | .callback = dmi_check_cb, | ||
| 649 | }, | ||
| 650 | { | ||
| 651 | .ident = "R70/R71", | ||
| 652 | .matches = { | ||
| 653 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 654 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 655 | DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"), | ||
| 656 | DMI_MATCH(DMI_BOARD_NAME, "R70/R71"), | ||
| 657 | }, | ||
| 658 | .callback = dmi_check_cb, | ||
| 659 | }, | ||
| 660 | { | ||
| 661 | .ident = "P460", | ||
| 662 | .matches = { | ||
| 663 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 664 | DMI_MATCH(DMI_PRODUCT_NAME, "P460"), | ||
| 665 | DMI_MATCH(DMI_BOARD_NAME, "P460"), | ||
| 666 | }, | ||
| 667 | .callback = dmi_check_cb, | ||
| 668 | }, | ||
| 669 | { }, | ||
| 670 | }; | ||
| 671 | MODULE_DEVICE_TABLE(dmi, samsung_dmi_table); | ||
| 672 | |||
| 673 | static int find_signature(void __iomem *memcheck, const char *testStr) | ||
| 674 | { | ||
| 675 | int i = 0; | ||
| 676 | int loca; | ||
| 677 | |||
| 678 | for (loca = 0; loca < 0xffff; loca++) { | ||
| 679 | char temp = readb(memcheck + loca); | ||
| 680 | |||
| 681 | if (temp == testStr[i]) { | ||
| 682 | if (i == strlen(testStr)-1) | ||
| 683 | break; | ||
| 684 | ++i; | ||
| 685 | } else { | ||
| 686 | i = 0; | ||
| 687 | } | ||
| 688 | } | ||
| 689 | return loca; | ||
| 690 | } | ||
| 691 | |||
| 692 | static int __init samsung_init(void) | ||
| 693 | { | ||
| 694 | struct backlight_properties props; | ||
| 695 | struct sabi_retval sretval; | ||
| 696 | unsigned int ifaceP; | ||
| 697 | int i; | ||
| 698 | int loca; | ||
| 699 | int retval; | ||
| 700 | |||
| 701 | mutex_init(&sabi_mutex); | ||
| 702 | |||
| 703 | if (!force && !dmi_check_system(samsung_dmi_table)) | ||
| 704 | return -ENODEV; | ||
| 705 | |||
| 706 | f0000_segment = ioremap_nocache(0xf0000, 0xffff); | ||
| 707 | if (!f0000_segment) { | ||
| 708 | pr_err("Can't map the segment at 0xf0000\n"); | ||
| 709 | return -EINVAL; | ||
| 710 | } | ||
| 711 | |||
| 712 | /* Try to find one of the signatures in memory to find the header */ | ||
| 713 | for (i = 0; sabi_configs[i].test_string != 0; ++i) { | ||
| 714 | sabi_config = &sabi_configs[i]; | ||
| 715 | loca = find_signature(f0000_segment, sabi_config->test_string); | ||
| 716 | if (loca != 0xffff) | ||
| 717 | break; | ||
| 718 | } | ||
| 719 | |||
| 720 | if (loca == 0xffff) { | ||
| 721 | pr_err("This computer does not support SABI\n"); | ||
| 722 | goto error_no_signature; | ||
| 723 | } | ||
| 724 | |||
| 725 | /* point to the SMI port Number */ | ||
| 726 | loca += 1; | ||
| 727 | sabi = (f0000_segment + loca); | ||
| 728 | |||
| 729 | if (debug) { | ||
| 730 | printk(KERN_DEBUG "This computer supports SABI==%x\n", | ||
| 731 | loca + 0xf0000 - 6); | ||
| 732 | printk(KERN_DEBUG "SABI header:\n"); | ||
| 733 | printk(KERN_DEBUG " SMI Port Number = 0x%04x\n", | ||
| 734 | readw(sabi + sabi_config->header_offsets.port)); | ||
| 735 | printk(KERN_DEBUG " SMI Interface Function = 0x%02x\n", | ||
| 736 | readb(sabi + sabi_config->header_offsets.iface_func)); | ||
| 737 | printk(KERN_DEBUG " SMI enable memory buffer = 0x%02x\n", | ||
| 738 | readb(sabi + sabi_config->header_offsets.en_mem)); | ||
| 739 | printk(KERN_DEBUG " SMI restore memory buffer = 0x%02x\n", | ||
| 740 | readb(sabi + sabi_config->header_offsets.re_mem)); | ||
| 741 | printk(KERN_DEBUG " SABI data offset = 0x%04x\n", | ||
| 742 | readw(sabi + sabi_config->header_offsets.data_offset)); | ||
| 743 | printk(KERN_DEBUG " SABI data segment = 0x%04x\n", | ||
| 744 | readw(sabi + sabi_config->header_offsets.data_segment)); | ||
| 745 | } | ||
| 746 | |||
| 747 | /* Get a pointer to the SABI Interface */ | ||
| 748 | ifaceP = (readw(sabi + sabi_config->header_offsets.data_segment) & 0x0ffff) << 4; | ||
| 749 | ifaceP += readw(sabi + sabi_config->header_offsets.data_offset) & 0x0ffff; | ||
| 750 | sabi_iface = ioremap_nocache(ifaceP, 16); | ||
| 751 | if (!sabi_iface) { | ||
| 752 | pr_err("Can't remap %x\n", ifaceP); | ||
| 753 | goto exit; | ||
| 754 | } | ||
| 755 | if (debug) { | ||
| 756 | printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP); | ||
| 757 | printk(KERN_DEBUG "sabi_iface = %p\n", sabi_iface); | ||
| 758 | |||
| 759 | test_backlight(); | ||
| 760 | test_wireless(); | ||
| 761 | |||
| 762 | retval = sabi_get_command(sabi_config->commands.get_brightness, | ||
| 763 | &sretval); | ||
| 764 | printk(KERN_DEBUG "brightness = 0x%02x\n", sretval.retval[0]); | ||
| 765 | } | ||
| 766 | |||
| 767 | /* Turn on "Linux" mode in the BIOS */ | ||
| 768 | if (sabi_config->commands.set_linux != 0xff) { | ||
| 769 | retval = sabi_set_command(sabi_config->commands.set_linux, | ||
| 770 | 0x81); | ||
| 771 | if (retval) { | ||
| 772 | pr_warn("Linux mode was not set!\n"); | ||
| 773 | goto error_no_platform; | ||
| 774 | } | ||
| 775 | } | ||
| 776 | |||
| 777 | /* knock up a platform device to hang stuff off of */ | ||
| 778 | sdev = platform_device_register_simple("samsung", -1, NULL, 0); | ||
| 779 | if (IS_ERR(sdev)) | ||
| 780 | goto error_no_platform; | ||
| 781 | |||
| 782 | /* create a backlight device to talk to this one */ | ||
| 783 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
| 784 | props.type = BACKLIGHT_PLATFORM; | ||
| 785 | props.max_brightness = sabi_config->max_brightness; | ||
| 786 | backlight_device = backlight_device_register("samsung", &sdev->dev, | ||
| 787 | NULL, &backlight_ops, | ||
| 788 | &props); | ||
| 789 | if (IS_ERR(backlight_device)) | ||
| 790 | goto error_no_backlight; | ||
| 791 | |||
| 792 | backlight_device->props.brightness = read_brightness(); | ||
| 793 | backlight_device->props.power = FB_BLANK_UNBLANK; | ||
| 794 | backlight_update_status(backlight_device); | ||
| 795 | |||
| 796 | retval = init_wireless(sdev); | ||
| 797 | if (retval) | ||
| 798 | goto error_no_rfk; | ||
| 799 | |||
| 800 | retval = device_create_file(&sdev->dev, &dev_attr_performance_level); | ||
| 801 | if (retval) | ||
| 802 | goto error_file_create; | ||
| 803 | |||
| 804 | exit: | ||
| 805 | return 0; | ||
| 806 | |||
| 807 | error_file_create: | ||
| 808 | destroy_wireless(); | ||
| 809 | |||
| 810 | error_no_rfk: | ||
| 811 | backlight_device_unregister(backlight_device); | ||
| 812 | |||
| 813 | error_no_backlight: | ||
| 814 | platform_device_unregister(sdev); | ||
| 815 | |||
| 816 | error_no_platform: | ||
| 817 | iounmap(sabi_iface); | ||
| 818 | |||
| 819 | error_no_signature: | ||
| 820 | iounmap(f0000_segment); | ||
| 821 | return -EINVAL; | ||
| 822 | } | ||
| 823 | |||
| 824 | static void __exit samsung_exit(void) | ||
| 825 | { | ||
| 826 | /* Turn off "Linux" mode in the BIOS */ | ||
| 827 | if (sabi_config->commands.set_linux != 0xff) | ||
| 828 | sabi_set_command(sabi_config->commands.set_linux, 0x80); | ||
| 829 | |||
| 830 | device_remove_file(&sdev->dev, &dev_attr_performance_level); | ||
| 831 | backlight_device_unregister(backlight_device); | ||
| 832 | destroy_wireless(); | ||
| 833 | iounmap(sabi_iface); | ||
| 834 | iounmap(f0000_segment); | ||
| 835 | platform_device_unregister(sdev); | ||
| 836 | } | ||
| 837 | |||
| 838 | module_init(samsung_init); | ||
| 839 | module_exit(samsung_exit); | ||
| 840 | |||
| 841 | MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>"); | ||
| 842 | MODULE_DESCRIPTION("Samsung Backlight driver"); | ||
| 843 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 41b6e51188e4..006489d82dc3 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig | |||
| @@ -66,6 +66,7 @@ config USB_ARCH_HAS_EHCI | |||
| 66 | default y if ARCH_VT8500 | 66 | default y if ARCH_VT8500 |
| 67 | default y if PLAT_SPEAR | 67 | default y if PLAT_SPEAR |
| 68 | default y if ARCH_MSM | 68 | default y if ARCH_MSM |
| 69 | default y if MICROBLAZE | ||
| 69 | default PCI | 70 | default PCI |
| 70 | 71 | ||
| 71 | # ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface. | 72 | # ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface. |
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index a3d2e2399655..96fdfb815f89 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c | |||
| @@ -221,7 +221,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, | |||
| 221 | break; | 221 | break; |
| 222 | case USB_ENDPOINT_XFER_INT: | 222 | case USB_ENDPOINT_XFER_INT: |
| 223 | type = "Int."; | 223 | type = "Int."; |
| 224 | if (speed == USB_SPEED_HIGH) | 224 | if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER) |
| 225 | interval = 1 << (desc->bInterval - 1); | 225 | interval = 1 << (desc->bInterval - 1); |
| 226 | else | 226 | else |
| 227 | interval = desc->bInterval; | 227 | interval = desc->bInterval; |
| @@ -229,7 +229,8 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, | |||
| 229 | default: /* "can't happen" */ | 229 | default: /* "can't happen" */ |
| 230 | return start; | 230 | return start; |
| 231 | } | 231 | } |
| 232 | interval *= (speed == USB_SPEED_HIGH) ? 125 : 1000; | 232 | interval *= (speed == USB_SPEED_HIGH || |
| 233 | speed == USB_SPEED_SUPER) ? 125 : 1000; | ||
| 233 | if (interval % 1000) | 234 | if (interval % 1000) |
| 234 | unit = 'u'; | 235 | unit = 'u'; |
| 235 | else { | 236 | else { |
| @@ -542,8 +543,9 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes, | |||
| 542 | if (level == 0) { | 543 | if (level == 0) { |
| 543 | int max; | 544 | int max; |
| 544 | 545 | ||
| 545 | /* high speed reserves 80%, full/low reserves 90% */ | 546 | /* super/high speed reserves 80%, full/low reserves 90% */ |
| 546 | if (usbdev->speed == USB_SPEED_HIGH) | 547 | if (usbdev->speed == USB_SPEED_HIGH || |
| 548 | usbdev->speed == USB_SPEED_SUPER) | ||
| 547 | max = 800; | 549 | max = 800; |
| 548 | else | 550 | else |
| 549 | max = FRAME_TIME_MAX_USECS_ALLOC; | 551 | max = FRAME_TIME_MAX_USECS_ALLOC; |
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 8eed05d23838..77a7faec8d78 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
| @@ -1908,7 +1908,7 @@ void usb_free_streams(struct usb_interface *interface, | |||
| 1908 | 1908 | ||
| 1909 | /* Streams only apply to bulk endpoints. */ | 1909 | /* Streams only apply to bulk endpoints. */ |
| 1910 | for (i = 0; i < num_eps; i++) | 1910 | for (i = 0; i < num_eps; i++) |
| 1911 | if (!usb_endpoint_xfer_bulk(&eps[i]->desc)) | 1911 | if (!eps[i] || !usb_endpoint_xfer_bulk(&eps[i]->desc)) |
| 1912 | return; | 1912 | return; |
| 1913 | 1913 | ||
| 1914 | hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags); | 1914 | hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags); |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 8fb754916c67..93720bdc9efd 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
| @@ -2285,7 +2285,17 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) | |||
| 2285 | } | 2285 | } |
| 2286 | 2286 | ||
| 2287 | /* see 7.1.7.6 */ | 2287 | /* see 7.1.7.6 */ |
| 2288 | status = set_port_feature(hub->hdev, port1, USB_PORT_FEAT_SUSPEND); | 2288 | /* Clear PORT_POWER if it's a USB3.0 device connected to USB 3.0 |
| 2289 | * external hub. | ||
| 2290 | * FIXME: this is a temporary workaround to make the system able | ||
| 2291 | * to suspend/resume. | ||
| 2292 | */ | ||
| 2293 | if ((hub->hdev->parent != NULL) && hub_is_superspeed(hub->hdev)) | ||
| 2294 | status = clear_port_feature(hub->hdev, port1, | ||
| 2295 | USB_PORT_FEAT_POWER); | ||
| 2296 | else | ||
| 2297 | status = set_port_feature(hub->hdev, port1, | ||
| 2298 | USB_PORT_FEAT_SUSPEND); | ||
| 2289 | if (status) { | 2299 | if (status) { |
| 2290 | dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", | 2300 | dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", |
| 2291 | port1, status); | 2301 | port1, status); |
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c index 9abecfddb27d..0111f8a9cf7f 100644 --- a/drivers/usb/gadget/f_audio.c +++ b/drivers/usb/gadget/f_audio.c | |||
| @@ -706,6 +706,7 @@ f_audio_unbind(struct usb_configuration *c, struct usb_function *f) | |||
| 706 | struct f_audio *audio = func_to_audio(f); | 706 | struct f_audio *audio = func_to_audio(f); |
| 707 | 707 | ||
| 708 | usb_free_descriptors(f->descriptors); | 708 | usb_free_descriptors(f->descriptors); |
| 709 | usb_free_descriptors(f->hs_descriptors); | ||
| 709 | kfree(audio); | 710 | kfree(audio); |
| 710 | } | 711 | } |
| 711 | 712 | ||
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c index 95dd4662d6a8..b3c304290150 100644 --- a/drivers/usb/gadget/f_eem.c +++ b/drivers/usb/gadget/f_eem.c | |||
| @@ -314,6 +314,9 @@ eem_unbind(struct usb_configuration *c, struct usb_function *f) | |||
| 314 | 314 | ||
| 315 | static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req) | 315 | static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req) |
| 316 | { | 316 | { |
| 317 | struct sk_buff *skb = (struct sk_buff *)req->context; | ||
| 318 | |||
| 319 | dev_kfree_skb_any(skb); | ||
| 317 | } | 320 | } |
| 318 | 321 | ||
| 319 | /* | 322 | /* |
| @@ -428,10 +431,11 @@ static int eem_unwrap(struct gether *port, | |||
| 428 | skb_trim(skb2, len); | 431 | skb_trim(skb2, len); |
| 429 | put_unaligned_le16(BIT(15) | BIT(11) | len, | 432 | put_unaligned_le16(BIT(15) | BIT(11) | len, |
| 430 | skb_push(skb2, 2)); | 433 | skb_push(skb2, 2)); |
| 431 | skb_copy_bits(skb, 0, req->buf, skb->len); | 434 | skb_copy_bits(skb2, 0, req->buf, skb2->len); |
| 432 | req->length = skb->len; | 435 | req->length = skb2->len; |
| 433 | req->complete = eem_cmd_complete; | 436 | req->complete = eem_cmd_complete; |
| 434 | req->zero = 1; | 437 | req->zero = 1; |
| 438 | req->context = skb2; | ||
| 435 | if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC)) | 439 | if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC)) |
| 436 | DBG(cdev, "echo response queue fail\n"); | 440 | DBG(cdev, "echo response queue fail\n"); |
| 437 | break; | 441 | break; |
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c index aee7e3c53c38..36613b37c504 100644 --- a/drivers/usb/gadget/fsl_qe_udc.c +++ b/drivers/usb/gadget/fsl_qe_udc.c | |||
| @@ -1148,6 +1148,12 @@ static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame) | |||
| 1148 | static int txcomplete(struct qe_ep *ep, unsigned char restart) | 1148 | static int txcomplete(struct qe_ep *ep, unsigned char restart) |
| 1149 | { | 1149 | { |
| 1150 | if (ep->tx_req != NULL) { | 1150 | if (ep->tx_req != NULL) { |
| 1151 | struct qe_req *req = ep->tx_req; | ||
| 1152 | unsigned zlp = 0, last_len = 0; | ||
| 1153 | |||
| 1154 | last_len = min_t(unsigned, req->req.length - ep->sent, | ||
| 1155 | ep->ep.maxpacket); | ||
| 1156 | |||
| 1151 | if (!restart) { | 1157 | if (!restart) { |
| 1152 | int asent = ep->last; | 1158 | int asent = ep->last; |
| 1153 | ep->sent += asent; | 1159 | ep->sent += asent; |
| @@ -1156,9 +1162,18 @@ static int txcomplete(struct qe_ep *ep, unsigned char restart) | |||
| 1156 | ep->last = 0; | 1162 | ep->last = 0; |
| 1157 | } | 1163 | } |
| 1158 | 1164 | ||
| 1165 | /* zlp needed when req->re.zero is set */ | ||
| 1166 | if (req->req.zero) { | ||
| 1167 | if (last_len == 0 || | ||
| 1168 | (req->req.length % ep->ep.maxpacket) != 0) | ||
| 1169 | zlp = 0; | ||
| 1170 | else | ||
| 1171 | zlp = 1; | ||
| 1172 | } else | ||
| 1173 | zlp = 0; | ||
| 1174 | |||
| 1159 | /* a request already were transmitted completely */ | 1175 | /* a request already were transmitted completely */ |
| 1160 | if ((ep->tx_req->req.length - ep->sent) <= 0) { | 1176 | if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) { |
| 1161 | ep->tx_req->req.actual = (unsigned int)ep->sent; | ||
| 1162 | done(ep, ep->tx_req, 0); | 1177 | done(ep, ep->tx_req, 0); |
| 1163 | ep->tx_req = NULL; | 1178 | ep->tx_req = NULL; |
| 1164 | ep->last = 0; | 1179 | ep->last = 0; |
| @@ -1191,6 +1206,7 @@ static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame) | |||
| 1191 | buf = (u8 *)ep->tx_req->req.buf + ep->sent; | 1206 | buf = (u8 *)ep->tx_req->req.buf + ep->sent; |
| 1192 | if (buf && size) { | 1207 | if (buf && size) { |
| 1193 | ep->last = size; | 1208 | ep->last = size; |
| 1209 | ep->tx_req->req.actual += size; | ||
| 1194 | frame_set_data(frame, buf); | 1210 | frame_set_data(frame, buf); |
| 1195 | frame_set_length(frame, size); | 1211 | frame_set_length(frame, size); |
| 1196 | frame_set_status(frame, FRAME_OK); | 1212 | frame_set_status(frame, FRAME_OK); |
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index 3ed73f49cf18..a01383f71f38 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c | |||
| @@ -386,8 +386,10 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) | |||
| 386 | 386 | ||
| 387 | /* halt any endpoint by doing a "wrong direction" i/o call */ | 387 | /* halt any endpoint by doing a "wrong direction" i/o call */ |
| 388 | if (usb_endpoint_dir_in(&data->desc)) { | 388 | if (usb_endpoint_dir_in(&data->desc)) { |
| 389 | if (usb_endpoint_xfer_isoc(&data->desc)) | 389 | if (usb_endpoint_xfer_isoc(&data->desc)) { |
| 390 | mutex_unlock(&data->lock); | ||
| 390 | return -EINVAL; | 391 | return -EINVAL; |
| 392 | } | ||
| 391 | DBG (data->dev, "%s halt\n", data->name); | 393 | DBG (data->dev, "%s halt\n", data->name); |
| 392 | spin_lock_irq (&data->dev->lock); | 394 | spin_lock_irq (&data->dev->lock); |
| 393 | if (likely (data->ep != NULL)) | 395 | if (likely (data->ep != NULL)) |
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c index 3e4b35e50c24..68dbcc3e4cc2 100644 --- a/drivers/usb/gadget/pch_udc.c +++ b/drivers/usb/gadget/pch_udc.c | |||
| @@ -1608,7 +1608,7 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq, | |||
| 1608 | return -EINVAL; | 1608 | return -EINVAL; |
| 1609 | if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN)) | 1609 | if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN)) |
| 1610 | return -ESHUTDOWN; | 1610 | return -ESHUTDOWN; |
| 1611 | spin_lock_irqsave(&ep->dev->lock, iflags); | 1611 | spin_lock_irqsave(&dev->lock, iflags); |
| 1612 | /* map the buffer for dma */ | 1612 | /* map the buffer for dma */ |
| 1613 | if (usbreq->length && | 1613 | if (usbreq->length && |
| 1614 | ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) { | 1614 | ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) { |
| @@ -1625,8 +1625,10 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq, | |||
| 1625 | DMA_FROM_DEVICE); | 1625 | DMA_FROM_DEVICE); |
| 1626 | } else { | 1626 | } else { |
| 1627 | req->buf = kzalloc(usbreq->length, GFP_ATOMIC); | 1627 | req->buf = kzalloc(usbreq->length, GFP_ATOMIC); |
| 1628 | if (!req->buf) | 1628 | if (!req->buf) { |
| 1629 | return -ENOMEM; | 1629 | retval = -ENOMEM; |
| 1630 | goto probe_end; | ||
| 1631 | } | ||
| 1630 | if (ep->in) { | 1632 | if (ep->in) { |
| 1631 | memcpy(req->buf, usbreq->buf, usbreq->length); | 1633 | memcpy(req->buf, usbreq->buf, usbreq->length); |
| 1632 | req->dma = dma_map_single(&dev->pdev->dev, | 1634 | req->dma = dma_map_single(&dev->pdev->dev, |
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c index 015118535f77..6dcc1f68fa60 100644 --- a/drivers/usb/gadget/r8a66597-udc.c +++ b/drivers/usb/gadget/r8a66597-udc.c | |||
| @@ -1083,7 +1083,9 @@ static void irq_device_state(struct r8a66597 *r8a66597) | |||
| 1083 | 1083 | ||
| 1084 | if (dvsq == DS_DFLT) { | 1084 | if (dvsq == DS_DFLT) { |
| 1085 | /* bus reset */ | 1085 | /* bus reset */ |
| 1086 | spin_unlock(&r8a66597->lock); | ||
| 1086 | r8a66597->driver->disconnect(&r8a66597->gadget); | 1087 | r8a66597->driver->disconnect(&r8a66597->gadget); |
| 1088 | spin_lock(&r8a66597->lock); | ||
| 1087 | r8a66597_update_usb_speed(r8a66597); | 1089 | r8a66597_update_usb_speed(r8a66597); |
| 1088 | } | 1090 | } |
| 1089 | if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG) | 1091 | if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG) |
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 98ded66e8d3f..42abd0f603bf 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c | |||
| @@ -1247,24 +1247,27 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 1247 | 1247 | ||
| 1248 | static void scan_async (struct ehci_hcd *ehci) | 1248 | static void scan_async (struct ehci_hcd *ehci) |
| 1249 | { | 1249 | { |
| 1250 | bool stopped; | ||
| 1250 | struct ehci_qh *qh; | 1251 | struct ehci_qh *qh; |
| 1251 | enum ehci_timer_action action = TIMER_IO_WATCHDOG; | 1252 | enum ehci_timer_action action = TIMER_IO_WATCHDOG; |
| 1252 | 1253 | ||
| 1253 | ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index); | 1254 | ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index); |
| 1254 | timer_action_done (ehci, TIMER_ASYNC_SHRINK); | 1255 | timer_action_done (ehci, TIMER_ASYNC_SHRINK); |
| 1255 | rescan: | 1256 | rescan: |
| 1257 | stopped = !HC_IS_RUNNING(ehci_to_hcd(ehci)->state); | ||
| 1256 | qh = ehci->async->qh_next.qh; | 1258 | qh = ehci->async->qh_next.qh; |
| 1257 | if (likely (qh != NULL)) { | 1259 | if (likely (qh != NULL)) { |
| 1258 | do { | 1260 | do { |
| 1259 | /* clean any finished work for this qh */ | 1261 | /* clean any finished work for this qh */ |
| 1260 | if (!list_empty (&qh->qtd_list) | 1262 | if (!list_empty(&qh->qtd_list) && (stopped || |
| 1261 | && qh->stamp != ehci->stamp) { | 1263 | qh->stamp != ehci->stamp)) { |
| 1262 | int temp; | 1264 | int temp; |
| 1263 | 1265 | ||
| 1264 | /* unlinks could happen here; completion | 1266 | /* unlinks could happen here; completion |
| 1265 | * reporting drops the lock. rescan using | 1267 | * reporting drops the lock. rescan using |
| 1266 | * the latest schedule, but don't rescan | 1268 | * the latest schedule, but don't rescan |
| 1267 | * qhs we already finished (no looping). | 1269 | * qhs we already finished (no looping) |
| 1270 | * unless the controller is stopped. | ||
| 1268 | */ | 1271 | */ |
| 1269 | qh = qh_get (qh); | 1272 | qh = qh_get (qh); |
| 1270 | qh->stamp = ehci->stamp; | 1273 | qh->stamp = ehci->stamp; |
| @@ -1285,9 +1288,9 @@ rescan: | |||
| 1285 | */ | 1288 | */ |
| 1286 | if (list_empty(&qh->qtd_list) | 1289 | if (list_empty(&qh->qtd_list) |
| 1287 | && qh->qh_state == QH_STATE_LINKED) { | 1290 | && qh->qh_state == QH_STATE_LINKED) { |
| 1288 | if (!ehci->reclaim | 1291 | if (!ehci->reclaim && (stopped || |
| 1289 | && ((ehci->stamp - qh->stamp) & 0x1fff) | 1292 | ((ehci->stamp - qh->stamp) & 0x1fff) |
| 1290 | >= (EHCI_SHRINK_FRAMES * 8)) | 1293 | >= EHCI_SHRINK_FRAMES * 8)) |
| 1291 | start_unlink_async(ehci, qh); | 1294 | start_unlink_async(ehci, qh); |
| 1292 | else | 1295 | else |
| 1293 | action = TIMER_ASYNC_SHRINK; | 1296 | action = TIMER_ASYNC_SHRINK; |
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index f50e84ac570a..795345ad45e6 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c | |||
| @@ -295,7 +295,7 @@ static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd) | |||
| 295 | } | 295 | } |
| 296 | 296 | ||
| 297 | dev_err(hcd->self.controller, | 297 | dev_err(hcd->self.controller, |
| 298 | "%s: Can not allocate %lu bytes of memory\n" | 298 | "%s: Cannot allocate %zu bytes of memory\n" |
| 299 | "Current memory map:\n", | 299 | "Current memory map:\n", |
| 300 | __func__, qtd->length); | 300 | __func__, qtd->length); |
| 301 | for (i = 0; i < BLOCKS; i++) { | 301 | for (i = 0; i < BLOCKS; i++) { |
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c index 17a6043c1fa0..958d985f2951 100644 --- a/drivers/usb/host/ohci-au1xxx.c +++ b/drivers/usb/host/ohci-au1xxx.c | |||
| @@ -33,7 +33,7 @@ | |||
| 33 | 33 | ||
| 34 | #ifdef __LITTLE_ENDIAN | 34 | #ifdef __LITTLE_ENDIAN |
| 35 | #define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C) | 35 | #define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C) |
| 36 | #elif __BIG_ENDIAN | 36 | #elif defined(__BIG_ENDIAN) |
| 37 | #define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C | \ | 37 | #define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C | \ |
| 38 | USBH_ENABLE_BE) | 38 | USBH_ENABLE_BE) |
| 39 | #else | 39 | #else |
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 1d586d4f7b56..9b166d70ae91 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c | |||
| @@ -84,65 +84,92 @@ int usb_amd_find_chipset_info(void) | |||
| 84 | { | 84 | { |
| 85 | u8 rev = 0; | 85 | u8 rev = 0; |
| 86 | unsigned long flags; | 86 | unsigned long flags; |
| 87 | struct amd_chipset_info info; | ||
| 88 | int ret; | ||
| 87 | 89 | ||
| 88 | spin_lock_irqsave(&amd_lock, flags); | 90 | spin_lock_irqsave(&amd_lock, flags); |
| 89 | 91 | ||
| 90 | amd_chipset.probe_count++; | ||
| 91 | /* probe only once */ | 92 | /* probe only once */ |
| 92 | if (amd_chipset.probe_count > 1) { | 93 | if (amd_chipset.probe_count > 0) { |
| 94 | amd_chipset.probe_count++; | ||
| 93 | spin_unlock_irqrestore(&amd_lock, flags); | 95 | spin_unlock_irqrestore(&amd_lock, flags); |
| 94 | return amd_chipset.probe_result; | 96 | return amd_chipset.probe_result; |
| 95 | } | 97 | } |
| 98 | memset(&info, 0, sizeof(info)); | ||
| 99 | spin_unlock_irqrestore(&amd_lock, flags); | ||
| 96 | 100 | ||
| 97 | amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL); | 101 | info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL); |
| 98 | if (amd_chipset.smbus_dev) { | 102 | if (info.smbus_dev) { |
| 99 | rev = amd_chipset.smbus_dev->revision; | 103 | rev = info.smbus_dev->revision; |
| 100 | if (rev >= 0x40) | 104 | if (rev >= 0x40) |
| 101 | amd_chipset.sb_type = 1; | 105 | info.sb_type = 1; |
| 102 | else if (rev >= 0x30 && rev <= 0x3b) | 106 | else if (rev >= 0x30 && rev <= 0x3b) |
| 103 | amd_chipset.sb_type = 3; | 107 | info.sb_type = 3; |
| 104 | } else { | 108 | } else { |
| 105 | amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, | 109 | info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, |
| 106 | 0x780b, NULL); | 110 | 0x780b, NULL); |
| 107 | if (!amd_chipset.smbus_dev) { | 111 | if (!info.smbus_dev) { |
| 108 | spin_unlock_irqrestore(&amd_lock, flags); | 112 | ret = 0; |
| 109 | return 0; | 113 | goto commit; |
| 110 | } | 114 | } |
| 111 | rev = amd_chipset.smbus_dev->revision; | 115 | |
| 116 | rev = info.smbus_dev->revision; | ||
| 112 | if (rev >= 0x11 && rev <= 0x18) | 117 | if (rev >= 0x11 && rev <= 0x18) |
| 113 | amd_chipset.sb_type = 2; | 118 | info.sb_type = 2; |
| 114 | } | 119 | } |
| 115 | 120 | ||
| 116 | if (amd_chipset.sb_type == 0) { | 121 | if (info.sb_type == 0) { |
| 117 | if (amd_chipset.smbus_dev) { | 122 | if (info.smbus_dev) { |
| 118 | pci_dev_put(amd_chipset.smbus_dev); | 123 | pci_dev_put(info.smbus_dev); |
| 119 | amd_chipset.smbus_dev = NULL; | 124 | info.smbus_dev = NULL; |
| 120 | } | 125 | } |
| 121 | spin_unlock_irqrestore(&amd_lock, flags); | 126 | ret = 0; |
| 122 | return 0; | 127 | goto commit; |
| 123 | } | 128 | } |
| 124 | 129 | ||
| 125 | amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL); | 130 | info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL); |
| 126 | if (amd_chipset.nb_dev) { | 131 | if (info.nb_dev) { |
| 127 | amd_chipset.nb_type = 1; | 132 | info.nb_type = 1; |
| 128 | } else { | 133 | } else { |
| 129 | amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, | 134 | info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL); |
| 130 | 0x1510, NULL); | 135 | if (info.nb_dev) { |
| 131 | if (amd_chipset.nb_dev) { | 136 | info.nb_type = 2; |
| 132 | amd_chipset.nb_type = 2; | 137 | } else { |
| 133 | } else { | 138 | info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, |
| 134 | amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, | 139 | 0x9600, NULL); |
| 135 | 0x9600, NULL); | 140 | if (info.nb_dev) |
| 136 | if (amd_chipset.nb_dev) | 141 | info.nb_type = 3; |
| 137 | amd_chipset.nb_type = 3; | ||
| 138 | } | 142 | } |
| 139 | } | 143 | } |
| 140 | 144 | ||
| 141 | amd_chipset.probe_result = 1; | 145 | ret = info.probe_result = 1; |
| 142 | printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); | 146 | printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); |
| 143 | 147 | ||
| 144 | spin_unlock_irqrestore(&amd_lock, flags); | 148 | commit: |
| 145 | return amd_chipset.probe_result; | 149 | |
| 150 | spin_lock_irqsave(&amd_lock, flags); | ||
| 151 | if (amd_chipset.probe_count > 0) { | ||
| 152 | /* race - someone else was faster - drop devices */ | ||
| 153 | |||
| 154 | /* Mark that we where here */ | ||
| 155 | amd_chipset.probe_count++; | ||
| 156 | ret = amd_chipset.probe_result; | ||
| 157 | |||
| 158 | spin_unlock_irqrestore(&amd_lock, flags); | ||
| 159 | |||
| 160 | if (info.nb_dev) | ||
| 161 | pci_dev_put(info.nb_dev); | ||
| 162 | if (info.smbus_dev) | ||
| 163 | pci_dev_put(info.smbus_dev); | ||
| 164 | |||
| 165 | } else { | ||
| 166 | /* no race - commit the result */ | ||
| 167 | info.probe_count++; | ||
| 168 | amd_chipset = info; | ||
| 169 | spin_unlock_irqrestore(&amd_lock, flags); | ||
| 170 | } | ||
| 171 | |||
| 172 | return ret; | ||
| 146 | } | 173 | } |
| 147 | EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); | 174 | EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); |
| 148 | 175 | ||
| @@ -284,6 +311,7 @@ EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable); | |||
| 284 | 311 | ||
| 285 | void usb_amd_dev_put(void) | 312 | void usb_amd_dev_put(void) |
| 286 | { | 313 | { |
| 314 | struct pci_dev *nb, *smbus; | ||
| 287 | unsigned long flags; | 315 | unsigned long flags; |
| 288 | 316 | ||
| 289 | spin_lock_irqsave(&amd_lock, flags); | 317 | spin_lock_irqsave(&amd_lock, flags); |
| @@ -294,20 +322,23 @@ void usb_amd_dev_put(void) | |||
| 294 | return; | 322 | return; |
| 295 | } | 323 | } |
| 296 | 324 | ||
| 297 | if (amd_chipset.nb_dev) { | 325 | /* save them to pci_dev_put outside of spinlock */ |
| 298 | pci_dev_put(amd_chipset.nb_dev); | 326 | nb = amd_chipset.nb_dev; |
| 299 | amd_chipset.nb_dev = NULL; | 327 | smbus = amd_chipset.smbus_dev; |
| 300 | } | 328 | |
| 301 | if (amd_chipset.smbus_dev) { | 329 | amd_chipset.nb_dev = NULL; |
| 302 | pci_dev_put(amd_chipset.smbus_dev); | 330 | amd_chipset.smbus_dev = NULL; |
| 303 | amd_chipset.smbus_dev = NULL; | ||
| 304 | } | ||
| 305 | amd_chipset.nb_type = 0; | 331 | amd_chipset.nb_type = 0; |
| 306 | amd_chipset.sb_type = 0; | 332 | amd_chipset.sb_type = 0; |
| 307 | amd_chipset.isoc_reqs = 0; | 333 | amd_chipset.isoc_reqs = 0; |
| 308 | amd_chipset.probe_result = 0; | 334 | amd_chipset.probe_result = 0; |
| 309 | 335 | ||
| 310 | spin_unlock_irqrestore(&amd_lock, flags); | 336 | spin_unlock_irqrestore(&amd_lock, flags); |
| 337 | |||
| 338 | if (nb) | ||
| 339 | pci_dev_put(nb); | ||
| 340 | if (smbus) | ||
| 341 | pci_dev_put(smbus); | ||
| 311 | } | 342 | } |
| 312 | EXPORT_SYMBOL_GPL(usb_amd_dev_put); | 343 | EXPORT_SYMBOL_GPL(usb_amd_dev_put); |
| 313 | 344 | ||
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index a003e79aacdc..627f3438028c 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
| @@ -846,7 +846,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci, | |||
| 846 | * Skip ports that don't have known speeds, or have duplicate | 846 | * Skip ports that don't have known speeds, or have duplicate |
| 847 | * Extended Capabilities port speed entries. | 847 | * Extended Capabilities port speed entries. |
| 848 | */ | 848 | */ |
| 849 | if (port_speed == 0 || port_speed == -1) | 849 | if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) |
| 850 | continue; | 850 | continue; |
| 851 | 851 | ||
| 852 | /* | 852 | /* |
| @@ -974,6 +974,47 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
| 974 | return 0; | 974 | return 0; |
| 975 | } | 975 | } |
| 976 | 976 | ||
| 977 | /* | ||
| 978 | * Convert interval expressed as 2^(bInterval - 1) == interval into | ||
| 979 | * straight exponent value 2^n == interval. | ||
| 980 | * | ||
| 981 | */ | ||
| 982 | static unsigned int xhci_parse_exponent_interval(struct usb_device *udev, | ||
| 983 | struct usb_host_endpoint *ep) | ||
| 984 | { | ||
| 985 | unsigned int interval; | ||
| 986 | |||
| 987 | interval = clamp_val(ep->desc.bInterval, 1, 16) - 1; | ||
| 988 | if (interval != ep->desc.bInterval - 1) | ||
| 989 | dev_warn(&udev->dev, | ||
| 990 | "ep %#x - rounding interval to %d microframes\n", | ||
| 991 | ep->desc.bEndpointAddress, | ||
| 992 | 1 << interval); | ||
| 993 | |||
| 994 | return interval; | ||
| 995 | } | ||
| 996 | |||
| 997 | /* | ||
| 998 | * Convert bInterval expressed in frames (in 1-255 range) to exponent of | ||
| 999 | * microframes, rounded down to nearest power of 2. | ||
| 1000 | */ | ||
| 1001 | static unsigned int xhci_parse_frame_interval(struct usb_device *udev, | ||
| 1002 | struct usb_host_endpoint *ep) | ||
| 1003 | { | ||
| 1004 | unsigned int interval; | ||
| 1005 | |||
| 1006 | interval = fls(8 * ep->desc.bInterval) - 1; | ||
| 1007 | interval = clamp_val(interval, 3, 10); | ||
| 1008 | if ((1 << interval) != 8 * ep->desc.bInterval) | ||
| 1009 | dev_warn(&udev->dev, | ||
| 1010 | "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", | ||
| 1011 | ep->desc.bEndpointAddress, | ||
| 1012 | 1 << interval, | ||
| 1013 | 8 * ep->desc.bInterval); | ||
| 1014 | |||
| 1015 | return interval; | ||
| 1016 | } | ||
| 1017 | |||
| 977 | /* Return the polling or NAK interval. | 1018 | /* Return the polling or NAK interval. |
| 978 | * | 1019 | * |
| 979 | * The polling interval is expressed in "microframes". If xHCI's Interval field | 1020 | * The polling interval is expressed in "microframes". If xHCI's Interval field |
| @@ -982,7 +1023,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
| 982 | * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval | 1023 | * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval |
| 983 | * is set to 0. | 1024 | * is set to 0. |
| 984 | */ | 1025 | */ |
| 985 | static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, | 1026 | static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, |
| 986 | struct usb_host_endpoint *ep) | 1027 | struct usb_host_endpoint *ep) |
| 987 | { | 1028 | { |
| 988 | unsigned int interval = 0; | 1029 | unsigned int interval = 0; |
| @@ -991,45 +1032,38 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, | |||
| 991 | case USB_SPEED_HIGH: | 1032 | case USB_SPEED_HIGH: |
| 992 | /* Max NAK rate */ | 1033 | /* Max NAK rate */ |
| 993 | if (usb_endpoint_xfer_control(&ep->desc) || | 1034 | if (usb_endpoint_xfer_control(&ep->desc) || |
| 994 | usb_endpoint_xfer_bulk(&ep->desc)) | 1035 | usb_endpoint_xfer_bulk(&ep->desc)) { |
| 995 | interval = ep->desc.bInterval; | 1036 | interval = ep->desc.bInterval; |
| 1037 | break; | ||
| 1038 | } | ||
| 996 | /* Fall through - SS and HS isoc/int have same decoding */ | 1039 | /* Fall through - SS and HS isoc/int have same decoding */ |
| 1040 | |||
| 997 | case USB_SPEED_SUPER: | 1041 | case USB_SPEED_SUPER: |
| 998 | if (usb_endpoint_xfer_int(&ep->desc) || | 1042 | if (usb_endpoint_xfer_int(&ep->desc) || |
| 999 | usb_endpoint_xfer_isoc(&ep->desc)) { | 1043 | usb_endpoint_xfer_isoc(&ep->desc)) { |
| 1000 | if (ep->desc.bInterval == 0) | 1044 | interval = xhci_parse_exponent_interval(udev, ep); |
| 1001 | interval = 0; | ||
| 1002 | else | ||
| 1003 | interval = ep->desc.bInterval - 1; | ||
| 1004 | if (interval > 15) | ||
| 1005 | interval = 15; | ||
| 1006 | if (interval != ep->desc.bInterval + 1) | ||
| 1007 | dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", | ||
| 1008 | ep->desc.bEndpointAddress, 1 << interval); | ||
| 1009 | } | 1045 | } |
| 1010 | break; | 1046 | break; |
| 1011 | /* Convert bInterval (in 1-255 frames) to microframes and round down to | 1047 | |
| 1012 | * nearest power of 2. | ||
| 1013 | */ | ||
| 1014 | case USB_SPEED_FULL: | 1048 | case USB_SPEED_FULL: |
| 1049 | if (usb_endpoint_xfer_int(&ep->desc)) { | ||
| 1050 | interval = xhci_parse_exponent_interval(udev, ep); | ||
| 1051 | break; | ||
| 1052 | } | ||
| 1053 | /* | ||
| 1054 | * Fall through for isochronous endpoint interval decoding | ||
| 1055 | * since it uses the same rules as low speed interrupt | ||
| 1056 | * endpoints. | ||
| 1057 | */ | ||
| 1058 | |||
| 1015 | case USB_SPEED_LOW: | 1059 | case USB_SPEED_LOW: |
| 1016 | if (usb_endpoint_xfer_int(&ep->desc) || | 1060 | if (usb_endpoint_xfer_int(&ep->desc) || |
| 1017 | usb_endpoint_xfer_isoc(&ep->desc)) { | 1061 | usb_endpoint_xfer_isoc(&ep->desc)) { |
| 1018 | interval = fls(8*ep->desc.bInterval) - 1; | 1062 | |
| 1019 | if (interval > 10) | 1063 | interval = xhci_parse_frame_interval(udev, ep); |
| 1020 | interval = 10; | ||
| 1021 | if (interval < 3) | ||
| 1022 | interval = 3; | ||
| 1023 | if ((1 << interval) != 8*ep->desc.bInterval) | ||
| 1024 | dev_warn(&udev->dev, | ||
| 1025 | "ep %#x - rounding interval" | ||
| 1026 | " to %d microframes, " | ||
| 1027 | "ep desc says %d microframes\n", | ||
| 1028 | ep->desc.bEndpointAddress, | ||
| 1029 | 1 << interval, | ||
| 1030 | 8*ep->desc.bInterval); | ||
| 1031 | } | 1064 | } |
| 1032 | break; | 1065 | break; |
| 1066 | |||
| 1033 | default: | 1067 | default: |
| 1034 | BUG(); | 1068 | BUG(); |
| 1035 | } | 1069 | } |
| @@ -1041,7 +1075,7 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, | |||
| 1041 | * transaction opportunities per microframe", but that goes in the Max Burst | 1075 | * transaction opportunities per microframe", but that goes in the Max Burst |
| 1042 | * endpoint context field. | 1076 | * endpoint context field. |
| 1043 | */ | 1077 | */ |
| 1044 | static inline u32 xhci_get_endpoint_mult(struct usb_device *udev, | 1078 | static u32 xhci_get_endpoint_mult(struct usb_device *udev, |
| 1045 | struct usb_host_endpoint *ep) | 1079 | struct usb_host_endpoint *ep) |
| 1046 | { | 1080 | { |
| 1047 | if (udev->speed != USB_SPEED_SUPER || | 1081 | if (udev->speed != USB_SPEED_SUPER || |
| @@ -1050,7 +1084,7 @@ static inline u32 xhci_get_endpoint_mult(struct usb_device *udev, | |||
| 1050 | return ep->ss_ep_comp.bmAttributes; | 1084 | return ep->ss_ep_comp.bmAttributes; |
| 1051 | } | 1085 | } |
| 1052 | 1086 | ||
| 1053 | static inline u32 xhci_get_endpoint_type(struct usb_device *udev, | 1087 | static u32 xhci_get_endpoint_type(struct usb_device *udev, |
| 1054 | struct usb_host_endpoint *ep) | 1088 | struct usb_host_endpoint *ep) |
| 1055 | { | 1089 | { |
| 1056 | int in; | 1090 | int in; |
| @@ -1084,7 +1118,7 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev, | |||
| 1084 | * Basically, this is the maxpacket size, multiplied by the burst size | 1118 | * Basically, this is the maxpacket size, multiplied by the burst size |
| 1085 | * and mult size. | 1119 | * and mult size. |
| 1086 | */ | 1120 | */ |
| 1087 | static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, | 1121 | static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, |
| 1088 | struct usb_device *udev, | 1122 | struct usb_device *udev, |
| 1089 | struct usb_host_endpoint *ep) | 1123 | struct usb_host_endpoint *ep) |
| 1090 | { | 1124 | { |
| @@ -1727,12 +1761,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, | |||
| 1727 | * found a similar duplicate. | 1761 | * found a similar duplicate. |
| 1728 | */ | 1762 | */ |
| 1729 | if (xhci->port_array[i] != major_revision && | 1763 | if (xhci->port_array[i] != major_revision && |
| 1730 | xhci->port_array[i] != (u8) -1) { | 1764 | xhci->port_array[i] != DUPLICATE_ENTRY) { |
| 1731 | if (xhci->port_array[i] == 0x03) | 1765 | if (xhci->port_array[i] == 0x03) |
| 1732 | xhci->num_usb3_ports--; | 1766 | xhci->num_usb3_ports--; |
| 1733 | else | 1767 | else |
| 1734 | xhci->num_usb2_ports--; | 1768 | xhci->num_usb2_ports--; |
| 1735 | xhci->port_array[i] = (u8) -1; | 1769 | xhci->port_array[i] = DUPLICATE_ENTRY; |
| 1736 | } | 1770 | } |
| 1737 | /* FIXME: Should we disable the port? */ | 1771 | /* FIXME: Should we disable the port? */ |
| 1738 | continue; | 1772 | continue; |
| @@ -1831,7 +1865,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) | |||
| 1831 | for (i = 0; i < num_ports; i++) { | 1865 | for (i = 0; i < num_ports; i++) { |
| 1832 | if (xhci->port_array[i] == 0x03 || | 1866 | if (xhci->port_array[i] == 0x03 || |
| 1833 | xhci->port_array[i] == 0 || | 1867 | xhci->port_array[i] == 0 || |
| 1834 | xhci->port_array[i] == -1) | 1868 | xhci->port_array[i] == DUPLICATE_ENTRY) |
| 1835 | continue; | 1869 | continue; |
| 1836 | 1870 | ||
| 1837 | xhci->usb2_ports[port_index] = | 1871 | xhci->usb2_ports[port_index] = |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index ceea9f33491c..a10494c2f3c7 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
| @@ -114,6 +114,10 @@ static int xhci_pci_setup(struct usb_hcd *hcd) | |||
| 114 | if (pdev->vendor == PCI_VENDOR_ID_NEC) | 114 | if (pdev->vendor == PCI_VENDOR_ID_NEC) |
| 115 | xhci->quirks |= XHCI_NEC_HOST; | 115 | xhci->quirks |= XHCI_NEC_HOST; |
| 116 | 116 | ||
| 117 | /* AMD PLL quirk */ | ||
| 118 | if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) | ||
| 119 | xhci->quirks |= XHCI_AMD_PLL_FIX; | ||
| 120 | |||
| 117 | /* Make sure the HC is halted. */ | 121 | /* Make sure the HC is halted. */ |
| 118 | retval = xhci_halt(xhci); | 122 | retval = xhci_halt(xhci); |
| 119 | if (retval) | 123 | if (retval) |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index cfc1ad92473f..7437386a9a50 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
| @@ -93,7 +93,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, | |||
| 93 | /* Does this link TRB point to the first segment in a ring, | 93 | /* Does this link TRB point to the first segment in a ring, |
| 94 | * or was the previous TRB the last TRB on the last segment in the ERST? | 94 | * or was the previous TRB the last TRB on the last segment in the ERST? |
| 95 | */ | 95 | */ |
| 96 | static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, | 96 | static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, |
| 97 | struct xhci_segment *seg, union xhci_trb *trb) | 97 | struct xhci_segment *seg, union xhci_trb *trb) |
| 98 | { | 98 | { |
| 99 | if (ring == xhci->event_ring) | 99 | if (ring == xhci->event_ring) |
| @@ -107,7 +107,7 @@ static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring | |||
| 107 | * segment? I.e. would the updated event TRB pointer step off the end of the | 107 | * segment? I.e. would the updated event TRB pointer step off the end of the |
| 108 | * event seg? | 108 | * event seg? |
| 109 | */ | 109 | */ |
| 110 | static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | 110 | static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, |
| 111 | struct xhci_segment *seg, union xhci_trb *trb) | 111 | struct xhci_segment *seg, union xhci_trb *trb) |
| 112 | { | 112 | { |
| 113 | if (ring == xhci->event_ring) | 113 | if (ring == xhci->event_ring) |
| @@ -116,7 +116,7 @@ static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
| 116 | return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); | 116 | return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); |
| 117 | } | 117 | } |
| 118 | 118 | ||
| 119 | static inline int enqueue_is_link_trb(struct xhci_ring *ring) | 119 | static int enqueue_is_link_trb(struct xhci_ring *ring) |
| 120 | { | 120 | { |
| 121 | struct xhci_link_trb *link = &ring->enqueue->link; | 121 | struct xhci_link_trb *link = &ring->enqueue->link; |
| 122 | return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)); | 122 | return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)); |
| @@ -592,7 +592,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | |||
| 592 | ep->ep_state |= SET_DEQ_PENDING; | 592 | ep->ep_state |= SET_DEQ_PENDING; |
| 593 | } | 593 | } |
| 594 | 594 | ||
| 595 | static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, | 595 | static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, |
| 596 | struct xhci_virt_ep *ep) | 596 | struct xhci_virt_ep *ep) |
| 597 | { | 597 | { |
| 598 | ep->ep_state &= ~EP_HALT_PENDING; | 598 | ep->ep_state &= ~EP_HALT_PENDING; |
| @@ -619,6 +619,13 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, | |||
| 619 | 619 | ||
| 620 | /* Only giveback urb when this is the last td in urb */ | 620 | /* Only giveback urb when this is the last td in urb */ |
| 621 | if (urb_priv->td_cnt == urb_priv->length) { | 621 | if (urb_priv->td_cnt == urb_priv->length) { |
| 622 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { | ||
| 623 | xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; | ||
| 624 | if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { | ||
| 625 | if (xhci->quirks & XHCI_AMD_PLL_FIX) | ||
| 626 | usb_amd_quirk_pll_enable(); | ||
| 627 | } | ||
| 628 | } | ||
| 622 | usb_hcd_unlink_urb_from_ep(hcd, urb); | 629 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
| 623 | xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb); | 630 | xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb); |
| 624 | 631 | ||
| @@ -1209,7 +1216,7 @@ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd, | |||
| 1209 | * Skip ports that don't have known speeds, or have duplicate | 1216 | * Skip ports that don't have known speeds, or have duplicate |
| 1210 | * Extended Capabilities port speed entries. | 1217 | * Extended Capabilities port speed entries. |
| 1211 | */ | 1218 | */ |
| 1212 | if (port_speed == 0 || port_speed == -1) | 1219 | if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) |
| 1213 | continue; | 1220 | continue; |
| 1214 | 1221 | ||
| 1215 | /* | 1222 | /* |
| @@ -1235,6 +1242,7 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
| 1235 | u8 major_revision; | 1242 | u8 major_revision; |
| 1236 | struct xhci_bus_state *bus_state; | 1243 | struct xhci_bus_state *bus_state; |
| 1237 | u32 __iomem **port_array; | 1244 | u32 __iomem **port_array; |
| 1245 | bool bogus_port_status = false; | ||
| 1238 | 1246 | ||
| 1239 | /* Port status change events always have a successful completion code */ | 1247 | /* Port status change events always have a successful completion code */ |
| 1240 | if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { | 1248 | if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { |
| @@ -1247,6 +1255,7 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
| 1247 | max_ports = HCS_MAX_PORTS(xhci->hcs_params1); | 1255 | max_ports = HCS_MAX_PORTS(xhci->hcs_params1); |
| 1248 | if ((port_id <= 0) || (port_id > max_ports)) { | 1256 | if ((port_id <= 0) || (port_id > max_ports)) { |
| 1249 | xhci_warn(xhci, "Invalid port id %d\n", port_id); | 1257 | xhci_warn(xhci, "Invalid port id %d\n", port_id); |
| 1258 | bogus_port_status = true; | ||
| 1250 | goto cleanup; | 1259 | goto cleanup; |
| 1251 | } | 1260 | } |
| 1252 | 1261 | ||
| @@ -1258,12 +1267,14 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
| 1258 | xhci_warn(xhci, "Event for port %u not in " | 1267 | xhci_warn(xhci, "Event for port %u not in " |
| 1259 | "Extended Capabilities, ignoring.\n", | 1268 | "Extended Capabilities, ignoring.\n", |
| 1260 | port_id); | 1269 | port_id); |
| 1270 | bogus_port_status = true; | ||
| 1261 | goto cleanup; | 1271 | goto cleanup; |
| 1262 | } | 1272 | } |
| 1263 | if (major_revision == (u8) -1) { | 1273 | if (major_revision == DUPLICATE_ENTRY) { |
| 1264 | xhci_warn(xhci, "Event for port %u duplicated in" | 1274 | xhci_warn(xhci, "Event for port %u duplicated in" |
| 1265 | "Extended Capabilities, ignoring.\n", | 1275 | "Extended Capabilities, ignoring.\n", |
| 1266 | port_id); | 1276 | port_id); |
| 1277 | bogus_port_status = true; | ||
| 1267 | goto cleanup; | 1278 | goto cleanup; |
| 1268 | } | 1279 | } |
| 1269 | 1280 | ||
| @@ -1335,6 +1346,13 @@ cleanup: | |||
| 1335 | /* Update event ring dequeue pointer before dropping the lock */ | 1346 | /* Update event ring dequeue pointer before dropping the lock */ |
| 1336 | inc_deq(xhci, xhci->event_ring, true); | 1347 | inc_deq(xhci, xhci->event_ring, true); |
| 1337 | 1348 | ||
| 1349 | /* Don't make the USB core poll the roothub if we got a bad port status | ||
| 1350 | * change event. Besides, at that point we can't tell which roothub | ||
| 1351 | * (USB 2.0 or USB 3.0) to kick. | ||
| 1352 | */ | ||
| 1353 | if (bogus_port_status) | ||
| 1354 | return; | ||
| 1355 | |||
| 1338 | spin_unlock(&xhci->lock); | 1356 | spin_unlock(&xhci->lock); |
| 1339 | /* Pass this up to the core */ | 1357 | /* Pass this up to the core */ |
| 1340 | usb_hcd_poll_rh_status(hcd); | 1358 | usb_hcd_poll_rh_status(hcd); |
| @@ -1554,8 +1572,17 @@ td_cleanup: | |||
| 1554 | 1572 | ||
| 1555 | urb_priv->td_cnt++; | 1573 | urb_priv->td_cnt++; |
| 1556 | /* Giveback the urb when all the tds are completed */ | 1574 | /* Giveback the urb when all the tds are completed */ |
| 1557 | if (urb_priv->td_cnt == urb_priv->length) | 1575 | if (urb_priv->td_cnt == urb_priv->length) { |
| 1558 | ret = 1; | 1576 | ret = 1; |
| 1577 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { | ||
| 1578 | xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; | ||
| 1579 | if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs | ||
| 1580 | == 0) { | ||
| 1581 | if (xhci->quirks & XHCI_AMD_PLL_FIX) | ||
| 1582 | usb_amd_quirk_pll_enable(); | ||
| 1583 | } | ||
| 1584 | } | ||
| 1585 | } | ||
| 1559 | } | 1586 | } |
| 1560 | 1587 | ||
| 1561 | return ret; | 1588 | return ret; |
| @@ -1675,71 +1702,52 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
| 1675 | struct urb_priv *urb_priv; | 1702 | struct urb_priv *urb_priv; |
| 1676 | int idx; | 1703 | int idx; |
| 1677 | int len = 0; | 1704 | int len = 0; |
| 1678 | int skip_td = 0; | ||
| 1679 | union xhci_trb *cur_trb; | 1705 | union xhci_trb *cur_trb; |
| 1680 | struct xhci_segment *cur_seg; | 1706 | struct xhci_segment *cur_seg; |
| 1707 | struct usb_iso_packet_descriptor *frame; | ||
| 1681 | u32 trb_comp_code; | 1708 | u32 trb_comp_code; |
| 1709 | bool skip_td = false; | ||
| 1682 | 1710 | ||
| 1683 | ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); | 1711 | ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); |
| 1684 | trb_comp_code = GET_COMP_CODE(event->transfer_len); | 1712 | trb_comp_code = GET_COMP_CODE(event->transfer_len); |
| 1685 | urb_priv = td->urb->hcpriv; | 1713 | urb_priv = td->urb->hcpriv; |
| 1686 | idx = urb_priv->td_cnt; | 1714 | idx = urb_priv->td_cnt; |
| 1715 | frame = &td->urb->iso_frame_desc[idx]; | ||
| 1687 | 1716 | ||
| 1688 | if (ep->skip) { | 1717 | /* handle completion code */ |
| 1689 | /* The transfer is partly done */ | 1718 | switch (trb_comp_code) { |
| 1690 | *status = -EXDEV; | 1719 | case COMP_SUCCESS: |
| 1691 | td->urb->iso_frame_desc[idx].status = -EXDEV; | 1720 | frame->status = 0; |
| 1692 | } else { | 1721 | xhci_dbg(xhci, "Successful isoc transfer!\n"); |
| 1693 | /* handle completion code */ | 1722 | break; |
| 1694 | switch (trb_comp_code) { | 1723 | case COMP_SHORT_TX: |
| 1695 | case COMP_SUCCESS: | 1724 | frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ? |
| 1696 | td->urb->iso_frame_desc[idx].status = 0; | 1725 | -EREMOTEIO : 0; |
| 1697 | xhci_dbg(xhci, "Successful isoc transfer!\n"); | 1726 | break; |
| 1698 | break; | 1727 | case COMP_BW_OVER: |
| 1699 | case COMP_SHORT_TX: | 1728 | frame->status = -ECOMM; |
| 1700 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) | 1729 | skip_td = true; |
| 1701 | td->urb->iso_frame_desc[idx].status = | 1730 | break; |
| 1702 | -EREMOTEIO; | 1731 | case COMP_BUFF_OVER: |
| 1703 | else | 1732 | case COMP_BABBLE: |
| 1704 | td->urb->iso_frame_desc[idx].status = 0; | 1733 | frame->status = -EOVERFLOW; |
| 1705 | break; | 1734 | skip_td = true; |
| 1706 | case COMP_BW_OVER: | 1735 | break; |
| 1707 | td->urb->iso_frame_desc[idx].status = -ECOMM; | 1736 | case COMP_STALL: |
| 1708 | skip_td = 1; | 1737 | frame->status = -EPROTO; |
| 1709 | break; | 1738 | skip_td = true; |
| 1710 | case COMP_BUFF_OVER: | 1739 | break; |
| 1711 | case COMP_BABBLE: | 1740 | case COMP_STOP: |
| 1712 | td->urb->iso_frame_desc[idx].status = -EOVERFLOW; | 1741 | case COMP_STOP_INVAL: |
| 1713 | skip_td = 1; | 1742 | break; |
| 1714 | break; | 1743 | default: |
| 1715 | case COMP_STALL: | 1744 | frame->status = -1; |
| 1716 | td->urb->iso_frame_desc[idx].status = -EPROTO; | 1745 | break; |
| 1717 | skip_td = 1; | ||
| 1718 | break; | ||
| 1719 | case COMP_STOP: | ||
| 1720 | case COMP_STOP_INVAL: | ||
| 1721 | break; | ||
| 1722 | default: | ||
| 1723 | td->urb->iso_frame_desc[idx].status = -1; | ||
| 1724 | break; | ||
| 1725 | } | ||
| 1726 | } | ||
| 1727 | |||
| 1728 | /* calc actual length */ | ||
| 1729 | if (ep->skip) { | ||
| 1730 | td->urb->iso_frame_desc[idx].actual_length = 0; | ||
| 1731 | /* Update ring dequeue pointer */ | ||
| 1732 | while (ep_ring->dequeue != td->last_trb) | ||
| 1733 | inc_deq(xhci, ep_ring, false); | ||
| 1734 | inc_deq(xhci, ep_ring, false); | ||
| 1735 | return finish_td(xhci, td, event_trb, event, ep, status, true); | ||
| 1736 | } | 1746 | } |
| 1737 | 1747 | ||
| 1738 | if (trb_comp_code == COMP_SUCCESS || skip_td == 1) { | 1748 | if (trb_comp_code == COMP_SUCCESS || skip_td) { |
| 1739 | td->urb->iso_frame_desc[idx].actual_length = | 1749 | frame->actual_length = frame->length; |
| 1740 | td->urb->iso_frame_desc[idx].length; | 1750 | td->urb->actual_length += frame->length; |
| 1741 | td->urb->actual_length += | ||
| 1742 | td->urb->iso_frame_desc[idx].length; | ||
| 1743 | } else { | 1751 | } else { |
| 1744 | for (cur_trb = ep_ring->dequeue, | 1752 | for (cur_trb = ep_ring->dequeue, |
| 1745 | cur_seg = ep_ring->deq_seg; cur_trb != event_trb; | 1753 | cur_seg = ep_ring->deq_seg; cur_trb != event_trb; |
| @@ -1755,7 +1763,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
| 1755 | TRB_LEN(event->transfer_len); | 1763 | TRB_LEN(event->transfer_len); |
| 1756 | 1764 | ||
| 1757 | if (trb_comp_code != COMP_STOP_INVAL) { | 1765 | if (trb_comp_code != COMP_STOP_INVAL) { |
| 1758 | td->urb->iso_frame_desc[idx].actual_length = len; | 1766 | frame->actual_length = len; |
| 1759 | td->urb->actual_length += len; | 1767 | td->urb->actual_length += len; |
| 1760 | } | 1768 | } |
| 1761 | } | 1769 | } |
| @@ -1766,6 +1774,35 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
| 1766 | return finish_td(xhci, td, event_trb, event, ep, status, false); | 1774 | return finish_td(xhci, td, event_trb, event, ep, status, false); |
| 1767 | } | 1775 | } |
| 1768 | 1776 | ||
| 1777 | static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, | ||
| 1778 | struct xhci_transfer_event *event, | ||
| 1779 | struct xhci_virt_ep *ep, int *status) | ||
| 1780 | { | ||
| 1781 | struct xhci_ring *ep_ring; | ||
| 1782 | struct urb_priv *urb_priv; | ||
| 1783 | struct usb_iso_packet_descriptor *frame; | ||
| 1784 | int idx; | ||
| 1785 | |||
| 1786 | ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); | ||
| 1787 | urb_priv = td->urb->hcpriv; | ||
| 1788 | idx = urb_priv->td_cnt; | ||
| 1789 | frame = &td->urb->iso_frame_desc[idx]; | ||
| 1790 | |||
| 1791 | /* The transfer is partly done */ | ||
| 1792 | *status = -EXDEV; | ||
| 1793 | frame->status = -EXDEV; | ||
| 1794 | |||
| 1795 | /* calc actual length */ | ||
| 1796 | frame->actual_length = 0; | ||
| 1797 | |||
| 1798 | /* Update ring dequeue pointer */ | ||
| 1799 | while (ep_ring->dequeue != td->last_trb) | ||
| 1800 | inc_deq(xhci, ep_ring, false); | ||
| 1801 | inc_deq(xhci, ep_ring, false); | ||
| 1802 | |||
| 1803 | return finish_td(xhci, td, NULL, event, ep, status, true); | ||
| 1804 | } | ||
| 1805 | |||
| 1769 | /* | 1806 | /* |
| 1770 | * Process bulk and interrupt tds, update urb status and actual_length. | 1807 | * Process bulk and interrupt tds, update urb status and actual_length. |
| 1771 | */ | 1808 | */ |
| @@ -2024,36 +2061,42 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 2024 | } | 2061 | } |
| 2025 | 2062 | ||
| 2026 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); | 2063 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); |
| 2064 | |||
| 2027 | /* Is this a TRB in the currently executing TD? */ | 2065 | /* Is this a TRB in the currently executing TD? */ |
| 2028 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, | 2066 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, |
| 2029 | td->last_trb, event_dma); | 2067 | td->last_trb, event_dma); |
| 2030 | if (event_seg && ep->skip) { | 2068 | if (!event_seg) { |
| 2069 | if (!ep->skip || | ||
| 2070 | !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { | ||
| 2071 | /* HC is busted, give up! */ | ||
| 2072 | xhci_err(xhci, | ||
| 2073 | "ERROR Transfer event TRB DMA ptr not " | ||
| 2074 | "part of current TD\n"); | ||
| 2075 | return -ESHUTDOWN; | ||
| 2076 | } | ||
| 2077 | |||
| 2078 | ret = skip_isoc_td(xhci, td, event, ep, &status); | ||
| 2079 | goto cleanup; | ||
| 2080 | } | ||
| 2081 | |||
| 2082 | if (ep->skip) { | ||
| 2031 | xhci_dbg(xhci, "Found td. Clear skip flag.\n"); | 2083 | xhci_dbg(xhci, "Found td. Clear skip flag.\n"); |
| 2032 | ep->skip = false; | 2084 | ep->skip = false; |
| 2033 | } | 2085 | } |
| 2034 | if (!event_seg && | ||
| 2035 | (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) { | ||
| 2036 | /* HC is busted, give up! */ | ||
| 2037 | xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not " | ||
| 2038 | "part of current TD\n"); | ||
| 2039 | return -ESHUTDOWN; | ||
| 2040 | } | ||
| 2041 | 2086 | ||
| 2042 | if (event_seg) { | 2087 | event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / |
| 2043 | event_trb = &event_seg->trbs[(event_dma - | 2088 | sizeof(*event_trb)]; |
| 2044 | event_seg->dma) / sizeof(*event_trb)]; | 2089 | /* |
| 2045 | /* | 2090 | * No-op TRB should not trigger interrupts. |
| 2046 | * No-op TRB should not trigger interrupts. | 2091 | * If event_trb is a no-op TRB, it means the |
| 2047 | * If event_trb is a no-op TRB, it means the | 2092 | * corresponding TD has been cancelled. Just ignore |
| 2048 | * corresponding TD has been cancelled. Just ignore | 2093 | * the TD. |
| 2049 | * the TD. | 2094 | */ |
| 2050 | */ | 2095 | if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK) |
| 2051 | if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK) | 2096 | == TRB_TYPE(TRB_TR_NOOP)) { |
| 2052 | == TRB_TYPE(TRB_TR_NOOP)) { | 2097 | xhci_dbg(xhci, |
| 2053 | xhci_dbg(xhci, "event_trb is a no-op TRB. " | 2098 | "event_trb is a no-op TRB. Skip it\n"); |
| 2054 | "Skip it\n"); | 2099 | goto cleanup; |
| 2055 | goto cleanup; | ||
| 2056 | } | ||
| 2057 | } | 2100 | } |
| 2058 | 2101 | ||
| 2059 | /* Now update the urb's actual_length and give back to | 2102 | /* Now update the urb's actual_length and give back to |
| @@ -3126,6 +3169,12 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
| 3126 | } | 3169 | } |
| 3127 | } | 3170 | } |
| 3128 | 3171 | ||
| 3172 | if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { | ||
| 3173 | if (xhci->quirks & XHCI_AMD_PLL_FIX) | ||
| 3174 | usb_amd_quirk_pll_disable(); | ||
| 3175 | } | ||
| 3176 | xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; | ||
| 3177 | |||
| 3129 | giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, | 3178 | giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, |
| 3130 | start_cycle, start_trb); | 3179 | start_cycle, start_trb); |
| 3131 | return 0; | 3180 | return 0; |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 196e0181b2ed..81b976e45880 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
| @@ -550,6 +550,9 @@ void xhci_stop(struct usb_hcd *hcd) | |||
| 550 | del_timer_sync(&xhci->event_ring_timer); | 550 | del_timer_sync(&xhci->event_ring_timer); |
| 551 | #endif | 551 | #endif |
| 552 | 552 | ||
| 553 | if (xhci->quirks & XHCI_AMD_PLL_FIX) | ||
| 554 | usb_amd_dev_put(); | ||
| 555 | |||
| 553 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); | 556 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); |
| 554 | temp = xhci_readl(xhci, &xhci->op_regs->status); | 557 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
| 555 | xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); | 558 | xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); |
| @@ -771,7 +774,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
| 771 | 774 | ||
| 772 | /* If restore operation fails, re-initialize the HC during resume */ | 775 | /* If restore operation fails, re-initialize the HC during resume */ |
| 773 | if ((temp & STS_SRE) || hibernated) { | 776 | if ((temp & STS_SRE) || hibernated) { |
| 774 | usb_root_hub_lost_power(hcd->self.root_hub); | 777 | /* Let the USB core know _both_ roothubs lost power. */ |
| 778 | usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); | ||
| 779 | usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); | ||
| 775 | 780 | ||
| 776 | xhci_dbg(xhci, "Stop HCD\n"); | 781 | xhci_dbg(xhci, "Stop HCD\n"); |
| 777 | xhci_halt(xhci); | 782 | xhci_halt(xhci); |
| @@ -2386,10 +2391,18 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
| 2386 | /* Everything but endpoint 0 is disabled, so free or cache the rings. */ | 2391 | /* Everything but endpoint 0 is disabled, so free or cache the rings. */ |
| 2387 | last_freed_endpoint = 1; | 2392 | last_freed_endpoint = 1; |
| 2388 | for (i = 1; i < 31; ++i) { | 2393 | for (i = 1; i < 31; ++i) { |
| 2389 | if (!virt_dev->eps[i].ring) | 2394 | struct xhci_virt_ep *ep = &virt_dev->eps[i]; |
| 2390 | continue; | 2395 | |
| 2391 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); | 2396 | if (ep->ep_state & EP_HAS_STREAMS) { |
| 2392 | last_freed_endpoint = i; | 2397 | xhci_free_stream_info(xhci, ep->stream_info); |
| 2398 | ep->stream_info = NULL; | ||
| 2399 | ep->ep_state &= ~EP_HAS_STREAMS; | ||
| 2400 | } | ||
| 2401 | |||
| 2402 | if (ep->ring) { | ||
| 2403 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); | ||
| 2404 | last_freed_endpoint = i; | ||
| 2405 | } | ||
| 2393 | } | 2406 | } |
| 2394 | xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); | 2407 | xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); |
| 2395 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); | 2408 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 07e263063e37..ba1be6b7cc6d 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | 30 | ||
| 31 | /* Code sharing between pci-quirks and xhci hcd */ | 31 | /* Code sharing between pci-quirks and xhci hcd */ |
| 32 | #include "xhci-ext-caps.h" | 32 | #include "xhci-ext-caps.h" |
| 33 | #include "pci-quirks.h" | ||
| 33 | 34 | ||
| 34 | /* xHCI PCI Configuration Registers */ | 35 | /* xHCI PCI Configuration Registers */ |
| 35 | #define XHCI_SBRN_OFFSET (0x60) | 36 | #define XHCI_SBRN_OFFSET (0x60) |
| @@ -232,7 +233,7 @@ struct xhci_op_regs { | |||
| 232 | * notification type that matches a bit set in this bit field. | 233 | * notification type that matches a bit set in this bit field. |
| 233 | */ | 234 | */ |
| 234 | #define DEV_NOTE_MASK (0xffff) | 235 | #define DEV_NOTE_MASK (0xffff) |
| 235 | #define ENABLE_DEV_NOTE(x) (1 << x) | 236 | #define ENABLE_DEV_NOTE(x) (1 << (x)) |
| 236 | /* Most of the device notification types should only be used for debug. | 237 | /* Most of the device notification types should only be used for debug. |
| 237 | * SW does need to pay attention to function wake notifications. | 238 | * SW does need to pay attention to function wake notifications. |
| 238 | */ | 239 | */ |
| @@ -348,6 +349,9 @@ struct xhci_op_regs { | |||
| 348 | /* Initiate a warm port reset - complete when PORT_WRC is '1' */ | 349 | /* Initiate a warm port reset - complete when PORT_WRC is '1' */ |
| 349 | #define PORT_WR (1 << 31) | 350 | #define PORT_WR (1 << 31) |
| 350 | 351 | ||
| 352 | /* We mark duplicate entries with -1 */ | ||
| 353 | #define DUPLICATE_ENTRY ((u8)(-1)) | ||
| 354 | |||
| 351 | /* Port Power Management Status and Control - port_power_base bitmasks */ | 355 | /* Port Power Management Status and Control - port_power_base bitmasks */ |
| 352 | /* Inactivity timer value for transitions into U1, in microseconds. | 356 | /* Inactivity timer value for transitions into U1, in microseconds. |
| 353 | * Timeout can be up to 127us. 0xFF means an infinite timeout. | 357 | * Timeout can be up to 127us. 0xFF means an infinite timeout. |
| @@ -601,11 +605,11 @@ struct xhci_ep_ctx { | |||
| 601 | #define EP_STATE_STOPPED 3 | 605 | #define EP_STATE_STOPPED 3 |
| 602 | #define EP_STATE_ERROR 4 | 606 | #define EP_STATE_ERROR 4 |
| 603 | /* Mult - Max number of burtst within an interval, in EP companion desc. */ | 607 | /* Mult - Max number of burtst within an interval, in EP companion desc. */ |
| 604 | #define EP_MULT(p) ((p & 0x3) << 8) | 608 | #define EP_MULT(p) (((p) & 0x3) << 8) |
| 605 | /* bits 10:14 are Max Primary Streams */ | 609 | /* bits 10:14 are Max Primary Streams */ |
| 606 | /* bit 15 is Linear Stream Array */ | 610 | /* bit 15 is Linear Stream Array */ |
| 607 | /* Interval - period between requests to an endpoint - 125u increments. */ | 611 | /* Interval - period between requests to an endpoint - 125u increments. */ |
| 608 | #define EP_INTERVAL(p) ((p & 0xff) << 16) | 612 | #define EP_INTERVAL(p) (((p) & 0xff) << 16) |
| 609 | #define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) | 613 | #define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) |
| 610 | #define EP_MAXPSTREAMS_MASK (0x1f << 10) | 614 | #define EP_MAXPSTREAMS_MASK (0x1f << 10) |
| 611 | #define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) | 615 | #define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) |
| @@ -1276,6 +1280,7 @@ struct xhci_hcd { | |||
| 1276 | #define XHCI_LINK_TRB_QUIRK (1 << 0) | 1280 | #define XHCI_LINK_TRB_QUIRK (1 << 0) |
| 1277 | #define XHCI_RESET_EP_QUIRK (1 << 1) | 1281 | #define XHCI_RESET_EP_QUIRK (1 << 1) |
| 1278 | #define XHCI_NEC_HOST (1 << 2) | 1282 | #define XHCI_NEC_HOST (1 << 2) |
| 1283 | #define XHCI_AMD_PLL_FIX (1 << 3) | ||
| 1279 | /* There are two roothubs to keep track of bus suspend info for */ | 1284 | /* There are two roothubs to keep track of bus suspend info for */ |
| 1280 | struct xhci_bus_state bus_state[2]; | 1285 | struct xhci_bus_state bus_state[2]; |
| 1281 | /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ | 1286 | /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ |
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 4cbb7e4b368d..74073b363c30 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig | |||
| @@ -14,7 +14,7 @@ config USB_MUSB_HDRC | |||
| 14 | select TWL4030_USB if MACH_OMAP_3430SDP | 14 | select TWL4030_USB if MACH_OMAP_3430SDP |
| 15 | select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA | 15 | select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA |
| 16 | select USB_OTG_UTILS | 16 | select USB_OTG_UTILS |
| 17 | tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' | 17 | bool 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' |
| 18 | help | 18 | help |
| 19 | Say Y here if your system has a dual role high speed USB | 19 | Say Y here if your system has a dual role high speed USB |
| 20 | controller based on the Mentor Graphics silicon IP. Then | 20 | controller based on the Mentor Graphics silicon IP. Then |
| @@ -30,8 +30,8 @@ config USB_MUSB_HDRC | |||
| 30 | 30 | ||
| 31 | If you do not know what this is, please say N. | 31 | If you do not know what this is, please say N. |
| 32 | 32 | ||
| 33 | To compile this driver as a module, choose M here; the | 33 | # To compile this driver as a module, choose M here; the |
| 34 | module will be called "musb-hdrc". | 34 | # module will be called "musb-hdrc". |
| 35 | 35 | ||
| 36 | choice | 36 | choice |
| 37 | prompt "Platform Glue Layer" | 37 | prompt "Platform Glue Layer" |
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index 52312e8af213..8e2a1ff8a35a 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <asm/cacheflush.h> | 21 | #include <asm/cacheflush.h> |
| 22 | 22 | ||
| 23 | #include "musb_core.h" | 23 | #include "musb_core.h" |
| 24 | #include "musbhsdma.h" | ||
| 24 | #include "blackfin.h" | 25 | #include "blackfin.h" |
| 25 | 26 | ||
| 26 | struct bfin_glue { | 27 | struct bfin_glue { |
| @@ -332,6 +333,27 @@ static int bfin_musb_set_mode(struct musb *musb, u8 musb_mode) | |||
| 332 | return -EIO; | 333 | return -EIO; |
| 333 | } | 334 | } |
| 334 | 335 | ||
| 336 | static int bfin_musb_adjust_channel_params(struct dma_channel *channel, | ||
| 337 | u16 packet_sz, u8 *mode, | ||
| 338 | dma_addr_t *dma_addr, u32 *len) | ||
| 339 | { | ||
| 340 | struct musb_dma_channel *musb_channel = channel->private_data; | ||
| 341 | |||
| 342 | /* | ||
| 343 | * Anomaly 05000450 might cause data corruption when using DMA | ||
| 344 | * MODE 1 transmits with short packet. So to work around this, | ||
| 345 | * we truncate all MODE 1 transfers down to a multiple of the | ||
| 346 | * max packet size, and then do the last short packet transfer | ||
| 347 | * (if there is any) using MODE 0. | ||
| 348 | */ | ||
| 349 | if (ANOMALY_05000450) { | ||
| 350 | if (musb_channel->transmit && *mode == 1) | ||
| 351 | *len = *len - (*len % packet_sz); | ||
| 352 | } | ||
| 353 | |||
| 354 | return 0; | ||
| 355 | } | ||
| 356 | |||
| 335 | static void bfin_musb_reg_init(struct musb *musb) | 357 | static void bfin_musb_reg_init(struct musb *musb) |
| 336 | { | 358 | { |
| 337 | if (ANOMALY_05000346) { | 359 | if (ANOMALY_05000346) { |
| @@ -430,6 +452,8 @@ static const struct musb_platform_ops bfin_ops = { | |||
| 430 | 452 | ||
| 431 | .vbus_status = bfin_musb_vbus_status, | 453 | .vbus_status = bfin_musb_vbus_status, |
| 432 | .set_vbus = bfin_musb_set_vbus, | 454 | .set_vbus = bfin_musb_set_vbus, |
| 455 | |||
| 456 | .adjust_channel_params = bfin_musb_adjust_channel_params, | ||
| 433 | }; | 457 | }; |
| 434 | 458 | ||
| 435 | static u64 bfin_dmamask = DMA_BIT_MASK(32); | 459 | static u64 bfin_dmamask = DMA_BIT_MASK(32); |
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index de55a3c3259a..ab434fbd8c35 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c | |||
| @@ -597,12 +597,12 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) | |||
| 597 | length = min(n_bds * maxpacket, length); | 597 | length = min(n_bds * maxpacket, length); |
| 598 | } | 598 | } |
| 599 | 599 | ||
| 600 | DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", | 600 | DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u\n", |
| 601 | tx->index, | 601 | tx->index, |
| 602 | maxpacket, | 602 | maxpacket, |
| 603 | rndis ? "rndis" : "transparent", | 603 | rndis ? "rndis" : "transparent", |
| 604 | n_bds, | 604 | n_bds, |
| 605 | addr, length); | 605 | (unsigned long long)addr, length); |
| 606 | 606 | ||
| 607 | cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); | 607 | cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); |
| 608 | 608 | ||
| @@ -820,7 +820,7 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) | |||
| 820 | length = min(n_bds * maxpacket, length); | 820 | length = min(n_bds * maxpacket, length); |
| 821 | 821 | ||
| 822 | DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " | 822 | DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " |
| 823 | "dma 0x%x len %u %u/%u\n", | 823 | "dma 0x%llx len %u %u/%u\n", |
| 824 | rx->index, maxpacket, | 824 | rx->index, maxpacket, |
| 825 | onepacket | 825 | onepacket |
| 826 | ? (is_rndis ? "rndis" : "onepacket") | 826 | ? (is_rndis ? "rndis" : "onepacket") |
| @@ -829,7 +829,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) | |||
| 829 | musb_readl(tibase, | 829 | musb_readl(tibase, |
| 830 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) | 830 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) |
| 831 | & 0xffff, | 831 | & 0xffff, |
| 832 | addr, length, rx->channel.actual_len, rx->buf_len); | 832 | (unsigned long long)addr, length, |
| 833 | rx->channel.actual_len, rx->buf_len); | ||
| 833 | 834 | ||
| 834 | /* only queue one segment at a time, since the hardware prevents | 835 | /* only queue one segment at a time, since the hardware prevents |
| 835 | * correct queue shutdown after unexpected short packets | 836 | * correct queue shutdown after unexpected short packets |
| @@ -1039,9 +1040,9 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) | |||
| 1039 | if (!completed && (bd->hw_options & CPPI_OWN_SET)) | 1040 | if (!completed && (bd->hw_options & CPPI_OWN_SET)) |
| 1040 | break; | 1041 | break; |
| 1041 | 1042 | ||
| 1042 | DBG(5, "C/RXBD %08x: nxt %08x buf %08x " | 1043 | DBG(5, "C/RXBD %llx: nxt %08x buf %08x " |
| 1043 | "off.len %08x opt.len %08x (%d)\n", | 1044 | "off.len %08x opt.len %08x (%d)\n", |
| 1044 | bd->dma, bd->hw_next, bd->hw_bufp, | 1045 | (unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp, |
| 1045 | bd->hw_off_len, bd->hw_options, | 1046 | bd->hw_off_len, bd->hw_options, |
| 1046 | rx->channel.actual_len); | 1047 | rx->channel.actual_len); |
| 1047 | 1048 | ||
| @@ -1111,11 +1112,12 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) | |||
| 1111 | musb_ep_select(cppi->mregs, rx->index + 1); | 1112 | musb_ep_select(cppi->mregs, rx->index + 1); |
| 1112 | csr = musb_readw(regs, MUSB_RXCSR); | 1113 | csr = musb_readw(regs, MUSB_RXCSR); |
| 1113 | if (csr & MUSB_RXCSR_DMAENAB) { | 1114 | if (csr & MUSB_RXCSR_DMAENAB) { |
| 1114 | DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", | 1115 | DBG(4, "list%d %p/%p, last %llx%s, csr %04x\n", |
| 1115 | rx->index, | 1116 | rx->index, |
| 1116 | rx->head, rx->tail, | 1117 | rx->head, rx->tail, |
| 1117 | rx->last_processed | 1118 | rx->last_processed |
| 1118 | ? rx->last_processed->dma | 1119 | ? (unsigned long long) |
| 1120 | rx->last_processed->dma | ||
| 1119 | : 0, | 1121 | : 0, |
| 1120 | completed ? ", completed" : "", | 1122 | completed ? ", completed" : "", |
| 1121 | csr); | 1123 | csr); |
| @@ -1167,8 +1169,11 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id) | |||
| 1167 | tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); | 1169 | tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); |
| 1168 | rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); | 1170 | rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); |
| 1169 | 1171 | ||
| 1170 | if (!tx && !rx) | 1172 | if (!tx && !rx) { |
| 1173 | if (cppi->irq) | ||
| 1174 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1171 | return IRQ_NONE; | 1175 | return IRQ_NONE; |
| 1176 | } | ||
| 1172 | 1177 | ||
| 1173 | DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx); | 1178 | DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx); |
| 1174 | 1179 | ||
| @@ -1199,7 +1204,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id) | |||
| 1199 | */ | 1204 | */ |
| 1200 | if (NULL == bd) { | 1205 | if (NULL == bd) { |
| 1201 | DBG(1, "null BD\n"); | 1206 | DBG(1, "null BD\n"); |
| 1202 | tx_ram->tx_complete = 0; | 1207 | musb_writel(&tx_ram->tx_complete, 0, 0); |
| 1203 | continue; | 1208 | continue; |
| 1204 | } | 1209 | } |
| 1205 | 1210 | ||
| @@ -1452,7 +1457,7 @@ static int cppi_channel_abort(struct dma_channel *channel) | |||
| 1452 | * compare mode by writing 1 to the tx_complete register. | 1457 | * compare mode by writing 1 to the tx_complete register. |
| 1453 | */ | 1458 | */ |
| 1454 | cppi_reset_tx(tx_ram, 1); | 1459 | cppi_reset_tx(tx_ram, 1); |
| 1455 | cppi_ch->head = 0; | 1460 | cppi_ch->head = NULL; |
| 1456 | musb_writel(&tx_ram->tx_complete, 0, 1); | 1461 | musb_writel(&tx_ram->tx_complete, 0, 1); |
| 1457 | cppi_dump_tx(5, cppi_ch, " (done teardown)"); | 1462 | cppi_dump_tx(5, cppi_ch, " (done teardown)"); |
| 1458 | 1463 | ||
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 630ae7f3cd4c..f10ff00ca09e 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
| @@ -1030,6 +1030,7 @@ static void musb_shutdown(struct platform_device *pdev) | |||
| 1030 | struct musb *musb = dev_to_musb(&pdev->dev); | 1030 | struct musb *musb = dev_to_musb(&pdev->dev); |
| 1031 | unsigned long flags; | 1031 | unsigned long flags; |
| 1032 | 1032 | ||
| 1033 | pm_runtime_get_sync(musb->controller); | ||
| 1033 | spin_lock_irqsave(&musb->lock, flags); | 1034 | spin_lock_irqsave(&musb->lock, flags); |
| 1034 | musb_platform_disable(musb); | 1035 | musb_platform_disable(musb); |
| 1035 | musb_generic_disable(musb); | 1036 | musb_generic_disable(musb); |
| @@ -1040,6 +1041,7 @@ static void musb_shutdown(struct platform_device *pdev) | |||
| 1040 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | 1041 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); |
| 1041 | musb_platform_exit(musb); | 1042 | musb_platform_exit(musb); |
| 1042 | 1043 | ||
| 1044 | pm_runtime_put(musb->controller); | ||
| 1043 | /* FIXME power down */ | 1045 | /* FIXME power down */ |
| 1044 | } | 1046 | } |
| 1045 | 1047 | ||
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 4bd9e2145ee4..0e053b587960 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h | |||
| @@ -261,6 +261,7 @@ enum musb_g_ep0_state { | |||
| 261 | * @try_ilde: tries to idle the IP | 261 | * @try_ilde: tries to idle the IP |
| 262 | * @vbus_status: returns vbus status if possible | 262 | * @vbus_status: returns vbus status if possible |
| 263 | * @set_vbus: forces vbus status | 263 | * @set_vbus: forces vbus status |
| 264 | * @channel_program: pre check for standard dma channel_program func | ||
| 264 | */ | 265 | */ |
| 265 | struct musb_platform_ops { | 266 | struct musb_platform_ops { |
| 266 | int (*init)(struct musb *musb); | 267 | int (*init)(struct musb *musb); |
| @@ -274,6 +275,10 @@ struct musb_platform_ops { | |||
| 274 | 275 | ||
| 275 | int (*vbus_status)(struct musb *musb); | 276 | int (*vbus_status)(struct musb *musb); |
| 276 | void (*set_vbus)(struct musb *musb, int on); | 277 | void (*set_vbus)(struct musb *musb, int on); |
| 278 | |||
| 279 | int (*adjust_channel_params)(struct dma_channel *channel, | ||
| 280 | u16 packet_sz, u8 *mode, | ||
| 281 | dma_addr_t *dma_addr, u32 *len); | ||
| 277 | }; | 282 | }; |
| 278 | 283 | ||
| 279 | /* | 284 | /* |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 98519c5d8b5c..6dfbf9ffd7a6 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
| @@ -535,7 +535,7 @@ void musb_g_tx(struct musb *musb, u8 epnum) | |||
| 535 | is_dma = 1; | 535 | is_dma = 1; |
| 536 | csr |= MUSB_TXCSR_P_WZC_BITS; | 536 | csr |= MUSB_TXCSR_P_WZC_BITS; |
| 537 | csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | | 537 | csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | |
| 538 | MUSB_TXCSR_TXPKTRDY); | 538 | MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); |
| 539 | musb_writew(epio, MUSB_TXCSR, csr); | 539 | musb_writew(epio, MUSB_TXCSR, csr); |
| 540 | /* Ensure writebuffer is empty. */ | 540 | /* Ensure writebuffer is empty. */ |
| 541 | csr = musb_readw(epio, MUSB_TXCSR); | 541 | csr = musb_readw(epio, MUSB_TXCSR); |
| @@ -1296,7 +1296,7 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) | |||
| 1296 | } | 1296 | } |
| 1297 | 1297 | ||
| 1298 | /* if the hardware doesn't have the request, easy ... */ | 1298 | /* if the hardware doesn't have the request, easy ... */ |
| 1299 | if (musb_ep->req_list.next != &request->list || musb_ep->busy) | 1299 | if (musb_ep->req_list.next != &req->list || musb_ep->busy) |
| 1300 | musb_g_giveback(musb_ep, request, -ECONNRESET); | 1300 | musb_g_giveback(musb_ep, request, -ECONNRESET); |
| 1301 | 1301 | ||
| 1302 | /* ... else abort the dma transfer ... */ | 1302 | /* ... else abort the dma transfer ... */ |
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index 0144a2d481fd..d281792db05c 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c | |||
| @@ -169,6 +169,14 @@ static int dma_channel_program(struct dma_channel *channel, | |||
| 169 | BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || | 169 | BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || |
| 170 | channel->status == MUSB_DMA_STATUS_BUSY); | 170 | channel->status == MUSB_DMA_STATUS_BUSY); |
| 171 | 171 | ||
| 172 | /* Let targets check/tweak the arguments */ | ||
| 173 | if (musb->ops->adjust_channel_params) { | ||
| 174 | int ret = musb->ops->adjust_channel_params(channel, | ||
| 175 | packet_sz, &mode, &dma_addr, &len); | ||
| 176 | if (ret) | ||
| 177 | return ret; | ||
| 178 | } | ||
| 179 | |||
| 172 | /* | 180 | /* |
| 173 | * The DMA engine in RTL1.8 and above cannot handle | 181 | * The DMA engine in RTL1.8 and above cannot handle |
| 174 | * DMA addresses that are not aligned to a 4 byte boundary. | 182 | * DMA addresses that are not aligned to a 4 byte boundary. |
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 25cb8b0003b1..57a27fa954b4 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c | |||
| @@ -259,9 +259,10 @@ static int musb_otg_notifications(struct notifier_block *nb, | |||
| 259 | case USB_EVENT_VBUS: | 259 | case USB_EVENT_VBUS: |
| 260 | DBG(4, "VBUS Connect\n"); | 260 | DBG(4, "VBUS Connect\n"); |
| 261 | 261 | ||
| 262 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 262 | if (musb->gadget_driver) | 263 | if (musb->gadget_driver) |
| 263 | pm_runtime_get_sync(musb->controller); | 264 | pm_runtime_get_sync(musb->controller); |
| 264 | 265 | #endif | |
| 265 | otg_init(musb->xceiv); | 266 | otg_init(musb->xceiv); |
| 266 | break; | 267 | break; |
| 267 | 268 | ||
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c index d6384e4aeef9..f7e04bf34a13 100644 --- a/drivers/usb/musb/ux500.c +++ b/drivers/usb/musb/ux500.c | |||
| @@ -93,6 +93,8 @@ static int __init ux500_probe(struct platform_device *pdev) | |||
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | musb->dev.parent = &pdev->dev; | 95 | musb->dev.parent = &pdev->dev; |
| 96 | musb->dev.dma_mask = pdev->dev.dma_mask; | ||
| 97 | musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; | ||
| 96 | 98 | ||
| 97 | glue->dev = &pdev->dev; | 99 | glue->dev = &pdev->dev; |
| 98 | glue->musb = musb; | 100 | glue->musb = musb; |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index a973c7a29d6e..4de6ef0ae52a 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
| @@ -151,6 +151,8 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = { | |||
| 151 | * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! | 151 | * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! |
| 152 | */ | 152 | */ |
| 153 | static struct usb_device_id id_table_combined [] = { | 153 | static struct usb_device_id id_table_combined [] = { |
| 154 | { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, | ||
| 155 | { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, | ||
| 154 | { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, | 156 | { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, |
| 155 | { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, | 157 | { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, |
| 156 | { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, | 158 | { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, |
| @@ -525,6 +527,7 @@ static struct usb_device_id id_table_combined [] = { | |||
| 525 | { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) }, | 527 | { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) }, |
| 526 | { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) }, | 528 | { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) }, |
| 527 | { USB_DEVICE(OCT_VID, OCT_US101_PID) }, | 529 | { USB_DEVICE(OCT_VID, OCT_US101_PID) }, |
| 530 | { USB_DEVICE(OCT_VID, OCT_DK201_PID) }, | ||
| 528 | { USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID), | 531 | { USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID), |
| 529 | .driver_info = (kernel_ulong_t)&ftdi_HE_TIRA1_quirk }, | 532 | .driver_info = (kernel_ulong_t)&ftdi_HE_TIRA1_quirk }, |
| 530 | { USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID), | 533 | { USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID), |
| @@ -787,6 +790,8 @@ static struct usb_device_id id_table_combined [] = { | |||
| 787 | { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), | 790 | { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), |
| 788 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 791 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
| 789 | { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) }, | 792 | { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) }, |
| 793 | { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) }, | ||
| 794 | { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) }, | ||
| 790 | { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) }, | 795 | { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) }, |
| 791 | { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) }, | 796 | { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) }, |
| 792 | { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) }, | 797 | { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) }, |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index c543e55bafba..efffc23723bd 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
| @@ -300,6 +300,8 @@ | |||
| 300 | * Hameg HO820 and HO870 interface (using VID 0x0403) | 300 | * Hameg HO820 and HO870 interface (using VID 0x0403) |
| 301 | */ | 301 | */ |
| 302 | #define HAMEG_HO820_PID 0xed74 | 302 | #define HAMEG_HO820_PID 0xed74 |
| 303 | #define HAMEG_HO730_PID 0xed73 | ||
| 304 | #define HAMEG_HO720_PID 0xed72 | ||
| 303 | #define HAMEG_HO870_PID 0xed71 | 305 | #define HAMEG_HO870_PID 0xed71 |
| 304 | 306 | ||
| 305 | /* | 307 | /* |
| @@ -572,6 +574,7 @@ | |||
| 572 | /* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */ | 574 | /* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */ |
| 573 | /* Also rebadged as Dick Smith Electronics (Aus) XH6451 */ | 575 | /* Also rebadged as Dick Smith Electronics (Aus) XH6451 */ |
| 574 | /* Also rebadged as SIIG Inc. model US2308 hardware version 1 */ | 576 | /* Also rebadged as SIIG Inc. model US2308 hardware version 1 */ |
| 577 | #define OCT_DK201_PID 0x0103 /* OCT DK201 USB docking station */ | ||
| 575 | #define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */ | 578 | #define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */ |
| 576 | 579 | ||
| 577 | /* | 580 | /* |
| @@ -1141,3 +1144,12 @@ | |||
| 1141 | #define QIHARDWARE_VID 0x20B7 | 1144 | #define QIHARDWARE_VID 0x20B7 |
| 1142 | #define MILKYMISTONE_JTAGSERIAL_PID 0x0713 | 1145 | #define MILKYMISTONE_JTAGSERIAL_PID 0x0713 |
| 1143 | 1146 | ||
| 1147 | /* | ||
| 1148 | * CTI GmbH RS485 Converter http://www.cti-lean.com/ | ||
| 1149 | */ | ||
| 1150 | /* USB-485-Mini*/ | ||
| 1151 | #define FTDI_CTI_MINI_PID 0xF608 | ||
| 1152 | /* USB-Nano-485*/ | ||
| 1153 | #define FTDI_CTI_NANO_PID 0xF60B | ||
| 1154 | |||
| 1155 | |||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 75c7f456eed5..d77ff0435896 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
| @@ -407,6 +407,10 @@ static void option_instat_callback(struct urb *urb); | |||
| 407 | /* ONDA MT825UP HSDPA 14.2 modem */ | 407 | /* ONDA MT825UP HSDPA 14.2 modem */ |
| 408 | #define ONDA_MT825UP 0x000b | 408 | #define ONDA_MT825UP 0x000b |
| 409 | 409 | ||
| 410 | /* Samsung products */ | ||
| 411 | #define SAMSUNG_VENDOR_ID 0x04e8 | ||
| 412 | #define SAMSUNG_PRODUCT_GT_B3730 0x6889 | ||
| 413 | |||
| 410 | /* some devices interfaces need special handling due to a number of reasons */ | 414 | /* some devices interfaces need special handling due to a number of reasons */ |
| 411 | enum option_blacklist_reason { | 415 | enum option_blacklist_reason { |
| 412 | OPTION_BLACKLIST_NONE = 0, | 416 | OPTION_BLACKLIST_NONE = 0, |
| @@ -968,6 +972,7 @@ static const struct usb_device_id option_ids[] = { | |||
| 968 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, | 972 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, |
| 969 | { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ | 973 | { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ |
| 970 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ | 974 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ |
| 975 | { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730/GT-B3710 LTE USB modem.*/ | ||
| 971 | { } /* Terminating entry */ | 976 | { } /* Terminating entry */ |
| 972 | }; | 977 | }; |
| 973 | MODULE_DEVICE_TABLE(usb, option_ids); | 978 | MODULE_DEVICE_TABLE(usb, option_ids); |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 8858201eb1d3..54a9dab1f33b 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
| @@ -111,7 +111,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
| 111 | ifnum = intf->desc.bInterfaceNumber; | 111 | ifnum = intf->desc.bInterfaceNumber; |
| 112 | dbg("This Interface = %d", ifnum); | 112 | dbg("This Interface = %d", ifnum); |
| 113 | 113 | ||
| 114 | data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), | 114 | data = kzalloc(sizeof(struct usb_wwan_intf_private), |
| 115 | GFP_KERNEL); | 115 | GFP_KERNEL); |
| 116 | if (!data) | 116 | if (!data) |
| 117 | return -ENOMEM; | 117 | return -ENOMEM; |
| @@ -134,8 +134,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
| 134 | usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) { | 134 | usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) { |
| 135 | dbg("QDL port found"); | 135 | dbg("QDL port found"); |
| 136 | 136 | ||
| 137 | if (serial->interface->num_altsetting == 1) | 137 | if (serial->interface->num_altsetting == 1) { |
| 138 | return 0; | 138 | retval = 0; /* Success */ |
| 139 | break; | ||
| 140 | } | ||
| 139 | 141 | ||
| 140 | retval = usb_set_interface(serial->dev, ifnum, 1); | 142 | retval = usb_set_interface(serial->dev, ifnum, 1); |
| 141 | if (retval < 0) { | 143 | if (retval < 0) { |
| @@ -145,7 +147,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
| 145 | retval = -ENODEV; | 147 | retval = -ENODEV; |
| 146 | kfree(data); | 148 | kfree(data); |
| 147 | } | 149 | } |
| 148 | return retval; | ||
| 149 | } | 150 | } |
| 150 | break; | 151 | break; |
| 151 | 152 | ||
| @@ -166,6 +167,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
| 166 | "Could not set interface, error %d\n", | 167 | "Could not set interface, error %d\n", |
| 167 | retval); | 168 | retval); |
| 168 | retval = -ENODEV; | 169 | retval = -ENODEV; |
| 170 | kfree(data); | ||
| 169 | } | 171 | } |
| 170 | } else if (ifnum == 2) { | 172 | } else if (ifnum == 2) { |
| 171 | dbg("Modem port found"); | 173 | dbg("Modem port found"); |
| @@ -177,7 +179,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
| 177 | retval = -ENODEV; | 179 | retval = -ENODEV; |
| 178 | kfree(data); | 180 | kfree(data); |
| 179 | } | 181 | } |
| 180 | return retval; | ||
| 181 | } else if (ifnum==3) { | 182 | } else if (ifnum==3) { |
| 182 | /* | 183 | /* |
| 183 | * NMEA (serial line 9600 8N1) | 184 | * NMEA (serial line 9600 8N1) |
| @@ -191,6 +192,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
| 191 | "Could not set interface, error %d\n", | 192 | "Could not set interface, error %d\n", |
| 192 | retval); | 193 | retval); |
| 193 | retval = -ENODEV; | 194 | retval = -ENODEV; |
| 195 | kfree(data); | ||
| 194 | } | 196 | } |
| 195 | } | 197 | } |
| 196 | break; | 198 | break; |
| @@ -199,12 +201,27 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
| 199 | dev_err(&serial->dev->dev, | 201 | dev_err(&serial->dev->dev, |
| 200 | "unknown number of interfaces: %d\n", nintf); | 202 | "unknown number of interfaces: %d\n", nintf); |
| 201 | kfree(data); | 203 | kfree(data); |
| 202 | return -ENODEV; | 204 | retval = -ENODEV; |
| 203 | } | 205 | } |
| 204 | 206 | ||
| 207 | /* Set serial->private if not returning -ENODEV */ | ||
| 208 | if (retval != -ENODEV) | ||
| 209 | usb_set_serial_data(serial, data); | ||
| 205 | return retval; | 210 | return retval; |
| 206 | } | 211 | } |
| 207 | 212 | ||
| 213 | static void qc_release(struct usb_serial *serial) | ||
| 214 | { | ||
| 215 | struct usb_wwan_intf_private *priv = usb_get_serial_data(serial); | ||
| 216 | |||
| 217 | dbg("%s", __func__); | ||
| 218 | |||
| 219 | /* Call usb_wwan release & free the private data allocated in qcprobe */ | ||
| 220 | usb_wwan_release(serial); | ||
| 221 | usb_set_serial_data(serial, NULL); | ||
| 222 | kfree(priv); | ||
| 223 | } | ||
| 224 | |||
| 208 | static struct usb_serial_driver qcdevice = { | 225 | static struct usb_serial_driver qcdevice = { |
| 209 | .driver = { | 226 | .driver = { |
| 210 | .owner = THIS_MODULE, | 227 | .owner = THIS_MODULE, |
| @@ -222,7 +239,7 @@ static struct usb_serial_driver qcdevice = { | |||
| 222 | .chars_in_buffer = usb_wwan_chars_in_buffer, | 239 | .chars_in_buffer = usb_wwan_chars_in_buffer, |
| 223 | .attach = usb_wwan_startup, | 240 | .attach = usb_wwan_startup, |
| 224 | .disconnect = usb_wwan_disconnect, | 241 | .disconnect = usb_wwan_disconnect, |
| 225 | .release = usb_wwan_release, | 242 | .release = qc_release, |
| 226 | #ifdef CONFIG_PM | 243 | #ifdef CONFIG_PM |
| 227 | .suspend = usb_wwan_suspend, | 244 | .suspend = usb_wwan_suspend, |
| 228 | .resume = usb_wwan_resume, | 245 | .resume = usb_wwan_resume, |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 42d6c930cc87..33167b43ac7e 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
| @@ -912,8 +912,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn, | |||
| 912 | unsigned long irqflags, | 912 | unsigned long irqflags, |
| 913 | const char *devname, void *dev_id) | 913 | const char *devname, void *dev_id) |
| 914 | { | 914 | { |
| 915 | unsigned int irq; | 915 | int irq, retval; |
| 916 | int retval; | ||
| 917 | 916 | ||
| 918 | irq = bind_evtchn_to_irq(evtchn); | 917 | irq = bind_evtchn_to_irq(evtchn); |
| 919 | if (irq < 0) | 918 | if (irq < 0) |
| @@ -955,8 +954,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, | |||
| 955 | irq_handler_t handler, | 954 | irq_handler_t handler, |
| 956 | unsigned long irqflags, const char *devname, void *dev_id) | 955 | unsigned long irqflags, const char *devname, void *dev_id) |
| 957 | { | 956 | { |
| 958 | unsigned int irq; | 957 | int irq, retval; |
| 959 | int retval; | ||
| 960 | 958 | ||
| 961 | irq = bind_virq_to_irq(virq, cpu); | 959 | irq = bind_virq_to_irq(virq, cpu); |
| 962 | if (irq < 0) | 960 | if (irq < 0) |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 95143dd6904d..1ac94125bf93 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
| @@ -61,7 +61,7 @@ static void xen_post_suspend(int cancelled) | |||
| 61 | xen_mm_unpin_all(); | 61 | xen_mm_unpin_all(); |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | #ifdef CONFIG_HIBERNATION | 64 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 65 | static int xen_suspend(void *data) | 65 | static int xen_suspend(void *data) |
| 66 | { | 66 | { |
| 67 | struct suspend_info *si = data; | 67 | struct suspend_info *si = data; |
| @@ -173,7 +173,7 @@ out: | |||
| 173 | #endif | 173 | #endif |
| 174 | shutting_down = SHUTDOWN_INVALID; | 174 | shutting_down = SHUTDOWN_INVALID; |
| 175 | } | 175 | } |
| 176 | #endif /* CONFIG_HIBERNATION */ | 176 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
| 177 | 177 | ||
| 178 | struct shutdown_handler { | 178 | struct shutdown_handler { |
| 179 | const char *command; | 179 | const char *command; |
| @@ -202,7 +202,7 @@ static void shutdown_handler(struct xenbus_watch *watch, | |||
| 202 | { "poweroff", do_poweroff }, | 202 | { "poweroff", do_poweroff }, |
| 203 | { "halt", do_poweroff }, | 203 | { "halt", do_poweroff }, |
| 204 | { "reboot", do_reboot }, | 204 | { "reboot", do_reboot }, |
| 205 | #ifdef CONFIG_HIBERNATION | 205 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 206 | { "suspend", do_suspend }, | 206 | { "suspend", do_suspend }, |
| 207 | #endif | 207 | #endif |
| 208 | {NULL, NULL}, | 208 | {NULL, NULL}, |
diff --git a/fs/9p/fid.c b/fs/9p/fid.c index 0ee594569dcc..85b67ffa2a43 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c | |||
| @@ -286,11 +286,9 @@ static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, uid_t uid) | |||
| 286 | 286 | ||
| 287 | struct p9_fid *v9fs_writeback_fid(struct dentry *dentry) | 287 | struct p9_fid *v9fs_writeback_fid(struct dentry *dentry) |
| 288 | { | 288 | { |
| 289 | int err, flags; | 289 | int err; |
| 290 | struct p9_fid *fid; | 290 | struct p9_fid *fid; |
| 291 | struct v9fs_session_info *v9ses; | ||
| 292 | 291 | ||
| 293 | v9ses = v9fs_dentry2v9ses(dentry); | ||
| 294 | fid = v9fs_fid_clone_with_uid(dentry, 0); | 292 | fid = v9fs_fid_clone_with_uid(dentry, 0); |
| 295 | if (IS_ERR(fid)) | 293 | if (IS_ERR(fid)) |
| 296 | goto error_out; | 294 | goto error_out; |
| @@ -299,17 +297,8 @@ struct p9_fid *v9fs_writeback_fid(struct dentry *dentry) | |||
| 299 | * dirty pages. We always request for the open fid in read-write | 297 | * dirty pages. We always request for the open fid in read-write |
| 300 | * mode so that a partial page write which result in page | 298 | * mode so that a partial page write which result in page |
| 301 | * read can work. | 299 | * read can work. |
| 302 | * | ||
| 303 | * we don't have a tsyncfs operation for older version | ||
| 304 | * of protocol. So make sure the write back fid is | ||
| 305 | * opened in O_SYNC mode. | ||
| 306 | */ | 300 | */ |
| 307 | if (!v9fs_proto_dotl(v9ses)) | 301 | err = p9_client_open(fid, O_RDWR); |
| 308 | flags = O_RDWR | O_SYNC; | ||
| 309 | else | ||
| 310 | flags = O_RDWR; | ||
| 311 | |||
| 312 | err = p9_client_open(fid, flags); | ||
| 313 | if (err < 0) { | 302 | if (err < 0) { |
| 314 | p9_client_clunk(fid); | 303 | p9_client_clunk(fid); |
| 315 | fid = ERR_PTR(err); | 304 | fid = ERR_PTR(err); |
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h index 9665c2b840e6..e5ebedfc5ed8 100644 --- a/fs/9p/v9fs.h +++ b/fs/9p/v9fs.h | |||
| @@ -116,7 +116,6 @@ struct v9fs_session_info { | |||
| 116 | struct list_head slist; /* list of sessions registered with v9fs */ | 116 | struct list_head slist; /* list of sessions registered with v9fs */ |
| 117 | struct backing_dev_info bdi; | 117 | struct backing_dev_info bdi; |
| 118 | struct rw_semaphore rename_sem; | 118 | struct rw_semaphore rename_sem; |
| 119 | struct p9_fid *root_fid; /* Used for file system sync */ | ||
| 120 | }; | 119 | }; |
| 121 | 120 | ||
| 122 | /* cache_validity flags */ | 121 | /* cache_validity flags */ |
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c index b6a3b9f7fe4d..e022890c6f40 100644 --- a/fs/9p/vfs_dentry.c +++ b/fs/9p/vfs_dentry.c | |||
| @@ -126,7 +126,9 @@ static int v9fs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
| 126 | retval = v9fs_refresh_inode_dotl(fid, inode); | 126 | retval = v9fs_refresh_inode_dotl(fid, inode); |
| 127 | else | 127 | else |
| 128 | retval = v9fs_refresh_inode(fid, inode); | 128 | retval = v9fs_refresh_inode(fid, inode); |
| 129 | if (retval <= 0) | 129 | if (retval == -ENOENT) |
| 130 | return 0; | ||
| 131 | if (retval < 0) | ||
| 130 | return retval; | 132 | return retval; |
| 131 | } | 133 | } |
| 132 | out_valid: | 134 | out_valid: |
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index ffbb113d5f33..82a7c38ddad0 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c | |||
| @@ -811,7 +811,7 @@ v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd) | |||
| 811 | fid = v9fs_fid_lookup(dentry); | 811 | fid = v9fs_fid_lookup(dentry); |
| 812 | if (IS_ERR(fid)) { | 812 | if (IS_ERR(fid)) { |
| 813 | __putname(link); | 813 | __putname(link); |
| 814 | link = ERR_PTR(PTR_ERR(fid)); | 814 | link = ERR_CAST(fid); |
| 815 | goto ndset; | 815 | goto ndset; |
| 816 | } | 816 | } |
| 817 | retval = p9_client_readlink(fid, &target); | 817 | retval = p9_client_readlink(fid, &target); |
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index f3eed3383e4f..feef6cdc1fd2 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c | |||
| @@ -154,6 +154,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, | |||
| 154 | retval = PTR_ERR(inode); | 154 | retval = PTR_ERR(inode); |
| 155 | goto release_sb; | 155 | goto release_sb; |
| 156 | } | 156 | } |
| 157 | |||
| 157 | root = d_alloc_root(inode); | 158 | root = d_alloc_root(inode); |
| 158 | if (!root) { | 159 | if (!root) { |
| 159 | iput(inode); | 160 | iput(inode); |
| @@ -185,21 +186,10 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, | |||
| 185 | p9stat_free(st); | 186 | p9stat_free(st); |
| 186 | kfree(st); | 187 | kfree(st); |
| 187 | } | 188 | } |
| 188 | v9fs_fid_add(root, fid); | ||
| 189 | retval = v9fs_get_acl(inode, fid); | 189 | retval = v9fs_get_acl(inode, fid); |
| 190 | if (retval) | 190 | if (retval) |
| 191 | goto release_sb; | 191 | goto release_sb; |
| 192 | /* | 192 | v9fs_fid_add(root, fid); |
| 193 | * Add the root fid to session info. This is used | ||
| 194 | * for file system sync. We want a cloned fid here | ||
| 195 | * so that we can do a sync_filesystem after a | ||
| 196 | * shrink_dcache_for_umount | ||
| 197 | */ | ||
| 198 | v9ses->root_fid = v9fs_fid_clone(root); | ||
| 199 | if (IS_ERR(v9ses->root_fid)) { | ||
| 200 | retval = PTR_ERR(v9ses->root_fid); | ||
| 201 | goto release_sb; | ||
| 202 | } | ||
| 203 | 193 | ||
| 204 | P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); | 194 | P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); |
| 205 | return dget(sb->s_root); | 195 | return dget(sb->s_root); |
| @@ -210,11 +200,15 @@ close_session: | |||
| 210 | v9fs_session_close(v9ses); | 200 | v9fs_session_close(v9ses); |
| 211 | kfree(v9ses); | 201 | kfree(v9ses); |
| 212 | return ERR_PTR(retval); | 202 | return ERR_PTR(retval); |
| 203 | |||
| 213 | release_sb: | 204 | release_sb: |
| 214 | /* | 205 | /* |
| 215 | * we will do the session_close and root dentry | 206 | * we will do the session_close and root dentry release |
| 216 | * release in the below call. | 207 | * in the below call. But we need to clunk fid, because we haven't |
| 208 | * attached the fid to dentry so it won't get clunked | ||
| 209 | * automatically. | ||
| 217 | */ | 210 | */ |
| 211 | p9_client_clunk(fid); | ||
| 218 | deactivate_locked_super(sb); | 212 | deactivate_locked_super(sb); |
| 219 | return ERR_PTR(retval); | 213 | return ERR_PTR(retval); |
| 220 | } | 214 | } |
| @@ -232,7 +226,7 @@ static void v9fs_kill_super(struct super_block *s) | |||
| 232 | P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s); | 226 | P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s); |
| 233 | 227 | ||
| 234 | kill_anon_super(s); | 228 | kill_anon_super(s); |
| 235 | p9_client_clunk(v9ses->root_fid); | 229 | |
| 236 | v9fs_session_cancel(v9ses); | 230 | v9fs_session_cancel(v9ses); |
| 237 | v9fs_session_close(v9ses); | 231 | v9fs_session_close(v9ses); |
| 238 | kfree(v9ses); | 232 | kfree(v9ses); |
| @@ -285,14 +279,6 @@ done: | |||
| 285 | return res; | 279 | return res; |
| 286 | } | 280 | } |
| 287 | 281 | ||
| 288 | static int v9fs_sync_fs(struct super_block *sb, int wait) | ||
| 289 | { | ||
| 290 | struct v9fs_session_info *v9ses = sb->s_fs_info; | ||
| 291 | |||
| 292 | P9_DPRINTK(P9_DEBUG_VFS, "v9fs_sync_fs: super_block %p\n", sb); | ||
| 293 | return p9_client_sync_fs(v9ses->root_fid); | ||
| 294 | } | ||
| 295 | |||
| 296 | static int v9fs_drop_inode(struct inode *inode) | 282 | static int v9fs_drop_inode(struct inode *inode) |
| 297 | { | 283 | { |
| 298 | struct v9fs_session_info *v9ses; | 284 | struct v9fs_session_info *v9ses; |
| @@ -307,6 +293,51 @@ static int v9fs_drop_inode(struct inode *inode) | |||
| 307 | return 1; | 293 | return 1; |
| 308 | } | 294 | } |
| 309 | 295 | ||
| 296 | static int v9fs_write_inode(struct inode *inode, | ||
| 297 | struct writeback_control *wbc) | ||
| 298 | { | ||
| 299 | int ret; | ||
| 300 | struct p9_wstat wstat; | ||
| 301 | struct v9fs_inode *v9inode; | ||
| 302 | /* | ||
| 303 | * send an fsync request to server irrespective of | ||
| 304 | * wbc->sync_mode. | ||
| 305 | */ | ||
| 306 | P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode); | ||
| 307 | v9inode = V9FS_I(inode); | ||
| 308 | if (!v9inode->writeback_fid) | ||
| 309 | return 0; | ||
| 310 | v9fs_blank_wstat(&wstat); | ||
| 311 | |||
| 312 | ret = p9_client_wstat(v9inode->writeback_fid, &wstat); | ||
| 313 | if (ret < 0) { | ||
| 314 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | ||
| 315 | return ret; | ||
| 316 | } | ||
| 317 | return 0; | ||
| 318 | } | ||
| 319 | |||
| 320 | static int v9fs_write_inode_dotl(struct inode *inode, | ||
| 321 | struct writeback_control *wbc) | ||
| 322 | { | ||
| 323 | int ret; | ||
| 324 | struct v9fs_inode *v9inode; | ||
| 325 | /* | ||
| 326 | * send an fsync request to server irrespective of | ||
| 327 | * wbc->sync_mode. | ||
| 328 | */ | ||
| 329 | P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode); | ||
| 330 | v9inode = V9FS_I(inode); | ||
| 331 | if (!v9inode->writeback_fid) | ||
| 332 | return 0; | ||
| 333 | ret = p9_client_fsync(v9inode->writeback_fid, 0); | ||
| 334 | if (ret < 0) { | ||
| 335 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | ||
| 336 | return ret; | ||
| 337 | } | ||
| 338 | return 0; | ||
| 339 | } | ||
| 340 | |||
| 310 | static const struct super_operations v9fs_super_ops = { | 341 | static const struct super_operations v9fs_super_ops = { |
| 311 | .alloc_inode = v9fs_alloc_inode, | 342 | .alloc_inode = v9fs_alloc_inode, |
| 312 | .destroy_inode = v9fs_destroy_inode, | 343 | .destroy_inode = v9fs_destroy_inode, |
| @@ -314,17 +345,18 @@ static const struct super_operations v9fs_super_ops = { | |||
| 314 | .evict_inode = v9fs_evict_inode, | 345 | .evict_inode = v9fs_evict_inode, |
| 315 | .show_options = generic_show_options, | 346 | .show_options = generic_show_options, |
| 316 | .umount_begin = v9fs_umount_begin, | 347 | .umount_begin = v9fs_umount_begin, |
| 348 | .write_inode = v9fs_write_inode, | ||
| 317 | }; | 349 | }; |
| 318 | 350 | ||
| 319 | static const struct super_operations v9fs_super_ops_dotl = { | 351 | static const struct super_operations v9fs_super_ops_dotl = { |
| 320 | .alloc_inode = v9fs_alloc_inode, | 352 | .alloc_inode = v9fs_alloc_inode, |
| 321 | .destroy_inode = v9fs_destroy_inode, | 353 | .destroy_inode = v9fs_destroy_inode, |
| 322 | .sync_fs = v9fs_sync_fs, | ||
| 323 | .statfs = v9fs_statfs, | 354 | .statfs = v9fs_statfs, |
| 324 | .drop_inode = v9fs_drop_inode, | 355 | .drop_inode = v9fs_drop_inode, |
| 325 | .evict_inode = v9fs_evict_inode, | 356 | .evict_inode = v9fs_evict_inode, |
| 326 | .show_options = generic_show_options, | 357 | .show_options = generic_show_options, |
| 327 | .umount_begin = v9fs_umount_begin, | 358 | .umount_begin = v9fs_umount_begin, |
| 359 | .write_inode = v9fs_write_inode_dotl, | ||
| 328 | }; | 360 | }; |
| 329 | 361 | ||
| 330 | struct file_system_type v9fs_fs_type = { | 362 | struct file_system_type v9fs_fs_type = { |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index f34078d702d3..303983fabfd6 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
| @@ -941,9 +941,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
| 941 | current->mm->start_stack = bprm->p; | 941 | current->mm->start_stack = bprm->p; |
| 942 | 942 | ||
| 943 | #ifdef arch_randomize_brk | 943 | #ifdef arch_randomize_brk |
| 944 | if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) | 944 | if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) { |
| 945 | current->mm->brk = current->mm->start_brk = | 945 | current->mm->brk = current->mm->start_brk = |
| 946 | arch_randomize_brk(current->mm); | 946 | arch_randomize_brk(current->mm); |
| 947 | #ifdef CONFIG_COMPAT_BRK | ||
| 948 | current->brk_randomized = 1; | ||
| 949 | #endif | ||
| 950 | } | ||
| 947 | #endif | 951 | #endif |
| 948 | 952 | ||
| 949 | if (current->personality & MMAP_PAGE_ZERO) { | 953 | if (current->personality & MMAP_PAGE_ZERO) { |
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index de34bfad9ec3..5d505aaa72fb 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c | |||
| @@ -178,16 +178,17 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, | |||
| 178 | 178 | ||
| 179 | if (value) { | 179 | if (value) { |
| 180 | acl = posix_acl_from_xattr(value, size); | 180 | acl = posix_acl_from_xattr(value, size); |
| 181 | if (acl == NULL) { | 181 | if (acl) { |
| 182 | value = NULL; | 182 | ret = posix_acl_valid(acl); |
| 183 | size = 0; | 183 | if (ret) |
| 184 | goto out; | ||
| 184 | } else if (IS_ERR(acl)) { | 185 | } else if (IS_ERR(acl)) { |
| 185 | return PTR_ERR(acl); | 186 | return PTR_ERR(acl); |
| 186 | } | 187 | } |
| 187 | } | 188 | } |
| 188 | 189 | ||
| 189 | ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type); | 190 | ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type); |
| 190 | 191 | out: | |
| 191 | posix_acl_release(acl); | 192 | posix_acl_release(acl); |
| 192 | 193 | ||
| 193 | return ret; | 194 | return ret; |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 3458b5725540..2e61fe1b6b8c 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
| @@ -740,8 +740,10 @@ struct btrfs_space_info { | |||
| 740 | */ | 740 | */ |
| 741 | unsigned long reservation_progress; | 741 | unsigned long reservation_progress; |
| 742 | 742 | ||
| 743 | int full; /* indicates that we cannot allocate any more | 743 | int full:1; /* indicates that we cannot allocate any more |
| 744 | chunks for this space */ | 744 | chunks for this space */ |
| 745 | int chunk_alloc:1; /* set if we are allocating a chunk */ | ||
| 746 | |||
| 745 | int force_alloc; /* set if we need to force a chunk alloc for | 747 | int force_alloc; /* set if we need to force a chunk alloc for |
| 746 | this space */ | 748 | this space */ |
| 747 | 749 | ||
| @@ -2576,6 +2578,11 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, | |||
| 2576 | int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, | 2578 | int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, |
| 2577 | struct inode *inode, u64 start, u64 end); | 2579 | struct inode *inode, u64 start, u64 end); |
| 2578 | int btrfs_release_file(struct inode *inode, struct file *file); | 2580 | int btrfs_release_file(struct inode *inode, struct file *file); |
| 2581 | void btrfs_drop_pages(struct page **pages, size_t num_pages); | ||
| 2582 | int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, | ||
| 2583 | struct page **pages, size_t num_pages, | ||
| 2584 | loff_t pos, size_t write_bytes, | ||
| 2585 | struct extent_state **cached); | ||
| 2579 | 2586 | ||
| 2580 | /* tree-defrag.c */ | 2587 | /* tree-defrag.c */ |
| 2581 | int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, | 2588 | int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8f1d44ba332f..68c84c8c24bd 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
| @@ -3057,7 +3057,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root) | |||
| 3057 | btrfs_destroy_pinned_extent(root, | 3057 | btrfs_destroy_pinned_extent(root, |
| 3058 | root->fs_info->pinned_extents); | 3058 | root->fs_info->pinned_extents); |
| 3059 | 3059 | ||
| 3060 | t->use_count = 0; | 3060 | atomic_set(&t->use_count, 0); |
| 3061 | list_del_init(&t->list); | 3061 | list_del_init(&t->list); |
| 3062 | memset(t, 0, sizeof(*t)); | 3062 | memset(t, 0, sizeof(*t)); |
| 3063 | kmem_cache_free(btrfs_transaction_cachep, t); | 3063 | kmem_cache_free(btrfs_transaction_cachep, t); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f619c3cb13b7..31f33ba56fe8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -33,6 +33,25 @@ | |||
| 33 | #include "locking.h" | 33 | #include "locking.h" |
| 34 | #include "free-space-cache.h" | 34 | #include "free-space-cache.h" |
| 35 | 35 | ||
| 36 | /* control flags for do_chunk_alloc's force field | ||
| 37 | * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk | ||
| 38 | * if we really need one. | ||
| 39 | * | ||
| 40 | * CHUNK_ALLOC_FORCE means it must try to allocate one | ||
| 41 | * | ||
| 42 | * CHUNK_ALLOC_LIMITED means to only try and allocate one | ||
| 43 | * if we have very few chunks already allocated. This is | ||
| 44 | * used as part of the clustering code to help make sure | ||
| 45 | * we have a good pool of storage to cluster in, without | ||
| 46 | * filling the FS with empty chunks | ||
| 47 | * | ||
| 48 | */ | ||
| 49 | enum { | ||
| 50 | CHUNK_ALLOC_NO_FORCE = 0, | ||
| 51 | CHUNK_ALLOC_FORCE = 1, | ||
| 52 | CHUNK_ALLOC_LIMITED = 2, | ||
| 53 | }; | ||
| 54 | |||
| 36 | static int update_block_group(struct btrfs_trans_handle *trans, | 55 | static int update_block_group(struct btrfs_trans_handle *trans, |
| 37 | struct btrfs_root *root, | 56 | struct btrfs_root *root, |
| 38 | u64 bytenr, u64 num_bytes, int alloc); | 57 | u64 bytenr, u64 num_bytes, int alloc); |
| @@ -3019,7 +3038,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, | |||
| 3019 | found->bytes_readonly = 0; | 3038 | found->bytes_readonly = 0; |
| 3020 | found->bytes_may_use = 0; | 3039 | found->bytes_may_use = 0; |
| 3021 | found->full = 0; | 3040 | found->full = 0; |
| 3022 | found->force_alloc = 0; | 3041 | found->force_alloc = CHUNK_ALLOC_NO_FORCE; |
| 3042 | found->chunk_alloc = 0; | ||
| 3023 | *space_info = found; | 3043 | *space_info = found; |
| 3024 | list_add_rcu(&found->list, &info->space_info); | 3044 | list_add_rcu(&found->list, &info->space_info); |
| 3025 | atomic_set(&found->caching_threads, 0); | 3045 | atomic_set(&found->caching_threads, 0); |
| @@ -3150,7 +3170,7 @@ again: | |||
| 3150 | if (!data_sinfo->full && alloc_chunk) { | 3170 | if (!data_sinfo->full && alloc_chunk) { |
| 3151 | u64 alloc_target; | 3171 | u64 alloc_target; |
| 3152 | 3172 | ||
| 3153 | data_sinfo->force_alloc = 1; | 3173 | data_sinfo->force_alloc = CHUNK_ALLOC_FORCE; |
| 3154 | spin_unlock(&data_sinfo->lock); | 3174 | spin_unlock(&data_sinfo->lock); |
| 3155 | alloc: | 3175 | alloc: |
| 3156 | alloc_target = btrfs_get_alloc_profile(root, 1); | 3176 | alloc_target = btrfs_get_alloc_profile(root, 1); |
| @@ -3160,7 +3180,8 @@ alloc: | |||
| 3160 | 3180 | ||
| 3161 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, | 3181 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, |
| 3162 | bytes + 2 * 1024 * 1024, | 3182 | bytes + 2 * 1024 * 1024, |
| 3163 | alloc_target, 0); | 3183 | alloc_target, |
| 3184 | CHUNK_ALLOC_NO_FORCE); | ||
| 3164 | btrfs_end_transaction(trans, root); | 3185 | btrfs_end_transaction(trans, root); |
| 3165 | if (ret < 0) { | 3186 | if (ret < 0) { |
| 3166 | if (ret != -ENOSPC) | 3187 | if (ret != -ENOSPC) |
| @@ -3239,31 +3260,56 @@ static void force_metadata_allocation(struct btrfs_fs_info *info) | |||
| 3239 | rcu_read_lock(); | 3260 | rcu_read_lock(); |
| 3240 | list_for_each_entry_rcu(found, head, list) { | 3261 | list_for_each_entry_rcu(found, head, list) { |
| 3241 | if (found->flags & BTRFS_BLOCK_GROUP_METADATA) | 3262 | if (found->flags & BTRFS_BLOCK_GROUP_METADATA) |
| 3242 | found->force_alloc = 1; | 3263 | found->force_alloc = CHUNK_ALLOC_FORCE; |
| 3243 | } | 3264 | } |
| 3244 | rcu_read_unlock(); | 3265 | rcu_read_unlock(); |
| 3245 | } | 3266 | } |
| 3246 | 3267 | ||
| 3247 | static int should_alloc_chunk(struct btrfs_root *root, | 3268 | static int should_alloc_chunk(struct btrfs_root *root, |
| 3248 | struct btrfs_space_info *sinfo, u64 alloc_bytes) | 3269 | struct btrfs_space_info *sinfo, u64 alloc_bytes, |
| 3270 | int force) | ||
| 3249 | { | 3271 | { |
| 3250 | u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; | 3272 | u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; |
| 3273 | u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved; | ||
| 3251 | u64 thresh; | 3274 | u64 thresh; |
| 3252 | 3275 | ||
| 3253 | if (sinfo->bytes_used + sinfo->bytes_reserved + | 3276 | if (force == CHUNK_ALLOC_FORCE) |
| 3254 | alloc_bytes + 256 * 1024 * 1024 < num_bytes) | 3277 | return 1; |
| 3278 | |||
| 3279 | /* | ||
| 3280 | * in limited mode, we want to have some free space up to | ||
| 3281 | * about 1% of the FS size. | ||
| 3282 | */ | ||
| 3283 | if (force == CHUNK_ALLOC_LIMITED) { | ||
| 3284 | thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); | ||
| 3285 | thresh = max_t(u64, 64 * 1024 * 1024, | ||
| 3286 | div_factor_fine(thresh, 1)); | ||
| 3287 | |||
| 3288 | if (num_bytes - num_allocated < thresh) | ||
| 3289 | return 1; | ||
| 3290 | } | ||
| 3291 | |||
| 3292 | /* | ||
| 3293 | * we have two similar checks here, one based on percentage | ||
| 3294 | * and once based on a hard number of 256MB. The idea | ||
| 3295 | * is that if we have a good amount of free | ||
| 3296 | * room, don't allocate a chunk. A good mount is | ||
| 3297 | * less than 80% utilized of the chunks we have allocated, | ||
| 3298 | * or more than 256MB free | ||
| 3299 | */ | ||
| 3300 | if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes) | ||
| 3255 | return 0; | 3301 | return 0; |
| 3256 | 3302 | ||
| 3257 | if (sinfo->bytes_used + sinfo->bytes_reserved + | 3303 | if (num_allocated + alloc_bytes < div_factor(num_bytes, 8)) |
| 3258 | alloc_bytes < div_factor(num_bytes, 8)) | ||
| 3259 | return 0; | 3304 | return 0; |
| 3260 | 3305 | ||
| 3261 | thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); | 3306 | thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); |
| 3307 | |||
| 3308 | /* 256MB or 5% of the FS */ | ||
| 3262 | thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5)); | 3309 | thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5)); |
| 3263 | 3310 | ||
| 3264 | if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3)) | 3311 | if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3)) |
| 3265 | return 0; | 3312 | return 0; |
| 3266 | |||
| 3267 | return 1; | 3313 | return 1; |
| 3268 | } | 3314 | } |
| 3269 | 3315 | ||
| @@ -3273,10 +3319,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |||
| 3273 | { | 3319 | { |
| 3274 | struct btrfs_space_info *space_info; | 3320 | struct btrfs_space_info *space_info; |
| 3275 | struct btrfs_fs_info *fs_info = extent_root->fs_info; | 3321 | struct btrfs_fs_info *fs_info = extent_root->fs_info; |
| 3322 | int wait_for_alloc = 0; | ||
| 3276 | int ret = 0; | 3323 | int ret = 0; |
| 3277 | 3324 | ||
| 3278 | mutex_lock(&fs_info->chunk_mutex); | ||
| 3279 | |||
| 3280 | flags = btrfs_reduce_alloc_profile(extent_root, flags); | 3325 | flags = btrfs_reduce_alloc_profile(extent_root, flags); |
| 3281 | 3326 | ||
| 3282 | space_info = __find_space_info(extent_root->fs_info, flags); | 3327 | space_info = __find_space_info(extent_root->fs_info, flags); |
| @@ -3287,21 +3332,40 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |||
| 3287 | } | 3332 | } |
| 3288 | BUG_ON(!space_info); | 3333 | BUG_ON(!space_info); |
| 3289 | 3334 | ||
| 3335 | again: | ||
| 3290 | spin_lock(&space_info->lock); | 3336 | spin_lock(&space_info->lock); |
| 3291 | if (space_info->force_alloc) | 3337 | if (space_info->force_alloc) |
| 3292 | force = 1; | 3338 | force = space_info->force_alloc; |
| 3293 | if (space_info->full) { | 3339 | if (space_info->full) { |
| 3294 | spin_unlock(&space_info->lock); | 3340 | spin_unlock(&space_info->lock); |
| 3295 | goto out; | 3341 | return 0; |
| 3296 | } | 3342 | } |
| 3297 | 3343 | ||
| 3298 | if (!force && !should_alloc_chunk(extent_root, space_info, | 3344 | if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) { |
| 3299 | alloc_bytes)) { | ||
| 3300 | spin_unlock(&space_info->lock); | 3345 | spin_unlock(&space_info->lock); |
| 3301 | goto out; | 3346 | return 0; |
| 3347 | } else if (space_info->chunk_alloc) { | ||
| 3348 | wait_for_alloc = 1; | ||
| 3349 | } else { | ||
| 3350 | space_info->chunk_alloc = 1; | ||
| 3302 | } | 3351 | } |
| 3352 | |||
| 3303 | spin_unlock(&space_info->lock); | 3353 | spin_unlock(&space_info->lock); |
| 3304 | 3354 | ||
| 3355 | mutex_lock(&fs_info->chunk_mutex); | ||
| 3356 | |||
| 3357 | /* | ||
| 3358 | * The chunk_mutex is held throughout the entirety of a chunk | ||
| 3359 | * allocation, so once we've acquired the chunk_mutex we know that the | ||
| 3360 | * other guy is done and we need to recheck and see if we should | ||
| 3361 | * allocate. | ||
| 3362 | */ | ||
| 3363 | if (wait_for_alloc) { | ||
| 3364 | mutex_unlock(&fs_info->chunk_mutex); | ||
| 3365 | wait_for_alloc = 0; | ||
| 3366 | goto again; | ||
| 3367 | } | ||
| 3368 | |||
| 3305 | /* | 3369 | /* |
| 3306 | * If we have mixed data/metadata chunks we want to make sure we keep | 3370 | * If we have mixed data/metadata chunks we want to make sure we keep |
| 3307 | * allocating mixed chunks instead of individual chunks. | 3371 | * allocating mixed chunks instead of individual chunks. |
| @@ -3327,9 +3391,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |||
| 3327 | space_info->full = 1; | 3391 | space_info->full = 1; |
| 3328 | else | 3392 | else |
| 3329 | ret = 1; | 3393 | ret = 1; |
| 3330 | space_info->force_alloc = 0; | 3394 | |
| 3395 | space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; | ||
| 3396 | space_info->chunk_alloc = 0; | ||
| 3331 | spin_unlock(&space_info->lock); | 3397 | spin_unlock(&space_info->lock); |
| 3332 | out: | ||
| 3333 | mutex_unlock(&extent_root->fs_info->chunk_mutex); | 3398 | mutex_unlock(&extent_root->fs_info->chunk_mutex); |
| 3334 | return ret; | 3399 | return ret; |
| 3335 | } | 3400 | } |
| @@ -5303,11 +5368,13 @@ loop: | |||
| 5303 | 5368 | ||
| 5304 | if (allowed_chunk_alloc) { | 5369 | if (allowed_chunk_alloc) { |
| 5305 | ret = do_chunk_alloc(trans, root, num_bytes + | 5370 | ret = do_chunk_alloc(trans, root, num_bytes + |
| 5306 | 2 * 1024 * 1024, data, 1); | 5371 | 2 * 1024 * 1024, data, |
| 5372 | CHUNK_ALLOC_LIMITED); | ||
| 5307 | allowed_chunk_alloc = 0; | 5373 | allowed_chunk_alloc = 0; |
| 5308 | done_chunk_alloc = 1; | 5374 | done_chunk_alloc = 1; |
| 5309 | } else if (!done_chunk_alloc) { | 5375 | } else if (!done_chunk_alloc && |
| 5310 | space_info->force_alloc = 1; | 5376 | space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) { |
| 5377 | space_info->force_alloc = CHUNK_ALLOC_LIMITED; | ||
| 5311 | } | 5378 | } |
| 5312 | 5379 | ||
| 5313 | if (loop < LOOP_NO_EMPTY_SIZE) { | 5380 | if (loop < LOOP_NO_EMPTY_SIZE) { |
| @@ -5393,7 +5460,8 @@ again: | |||
| 5393 | */ | 5460 | */ |
| 5394 | if (empty_size || root->ref_cows) | 5461 | if (empty_size || root->ref_cows) |
| 5395 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, | 5462 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, |
| 5396 | num_bytes + 2 * 1024 * 1024, data, 0); | 5463 | num_bytes + 2 * 1024 * 1024, data, |
| 5464 | CHUNK_ALLOC_NO_FORCE); | ||
| 5397 | 5465 | ||
| 5398 | WARN_ON(num_bytes < root->sectorsize); | 5466 | WARN_ON(num_bytes < root->sectorsize); |
| 5399 | ret = find_free_extent(trans, root, num_bytes, empty_size, | 5467 | ret = find_free_extent(trans, root, num_bytes, empty_size, |
| @@ -5405,7 +5473,7 @@ again: | |||
| 5405 | num_bytes = num_bytes & ~(root->sectorsize - 1); | 5473 | num_bytes = num_bytes & ~(root->sectorsize - 1); |
| 5406 | num_bytes = max(num_bytes, min_alloc_size); | 5474 | num_bytes = max(num_bytes, min_alloc_size); |
| 5407 | do_chunk_alloc(trans, root->fs_info->extent_root, | 5475 | do_chunk_alloc(trans, root->fs_info->extent_root, |
| 5408 | num_bytes, data, 1); | 5476 | num_bytes, data, CHUNK_ALLOC_FORCE); |
| 5409 | goto again; | 5477 | goto again; |
| 5410 | } | 5478 | } |
| 5411 | if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { | 5479 | if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { |
| @@ -8109,13 +8177,15 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, | |||
| 8109 | 8177 | ||
| 8110 | alloc_flags = update_block_group_flags(root, cache->flags); | 8178 | alloc_flags = update_block_group_flags(root, cache->flags); |
| 8111 | if (alloc_flags != cache->flags) | 8179 | if (alloc_flags != cache->flags) |
| 8112 | do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); | 8180 | do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, |
| 8181 | CHUNK_ALLOC_FORCE); | ||
| 8113 | 8182 | ||
| 8114 | ret = set_block_group_ro(cache); | 8183 | ret = set_block_group_ro(cache); |
| 8115 | if (!ret) | 8184 | if (!ret) |
| 8116 | goto out; | 8185 | goto out; |
| 8117 | alloc_flags = get_alloc_profile(root, cache->space_info->flags); | 8186 | alloc_flags = get_alloc_profile(root, cache->space_info->flags); |
| 8118 | ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); | 8187 | ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, |
| 8188 | CHUNK_ALLOC_FORCE); | ||
| 8119 | if (ret < 0) | 8189 | if (ret < 0) |
| 8120 | goto out; | 8190 | goto out; |
| 8121 | ret = set_block_group_ro(cache); | 8191 | ret = set_block_group_ro(cache); |
| @@ -8128,7 +8198,8 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, | |||
| 8128 | struct btrfs_root *root, u64 type) | 8198 | struct btrfs_root *root, u64 type) |
| 8129 | { | 8199 | { |
| 8130 | u64 alloc_flags = get_alloc_profile(root, type); | 8200 | u64 alloc_flags = get_alloc_profile(root, type); |
| 8131 | return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); | 8201 | return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, |
| 8202 | CHUNK_ALLOC_FORCE); | ||
| 8132 | } | 8203 | } |
| 8133 | 8204 | ||
| 8134 | /* | 8205 | /* |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 20ddb28602a8..315138605088 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
| @@ -690,6 +690,15 @@ static void cache_state(struct extent_state *state, | |||
| 690 | } | 690 | } |
| 691 | } | 691 | } |
| 692 | 692 | ||
| 693 | static void uncache_state(struct extent_state **cached_ptr) | ||
| 694 | { | ||
| 695 | if (cached_ptr && (*cached_ptr)) { | ||
| 696 | struct extent_state *state = *cached_ptr; | ||
| 697 | *cached_ptr = NULL; | ||
| 698 | free_extent_state(state); | ||
| 699 | } | ||
| 700 | } | ||
| 701 | |||
| 693 | /* | 702 | /* |
| 694 | * set some bits on a range in the tree. This may require allocations or | 703 | * set some bits on a range in the tree. This may require allocations or |
| 695 | * sleeping, so the gfp mask is used to indicate what is allowed. | 704 | * sleeping, so the gfp mask is used to indicate what is allowed. |
| @@ -940,10 +949,10 @@ static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end, | |||
| 940 | } | 949 | } |
| 941 | 950 | ||
| 942 | int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, | 951 | int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, |
| 943 | gfp_t mask) | 952 | struct extent_state **cached_state, gfp_t mask) |
| 944 | { | 953 | { |
| 945 | return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, | 954 | return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, |
| 946 | NULL, mask); | 955 | NULL, cached_state, mask); |
| 947 | } | 956 | } |
| 948 | 957 | ||
| 949 | static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, | 958 | static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, |
| @@ -1012,8 +1021,7 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, | |||
| 1012 | mask); | 1021 | mask); |
| 1013 | } | 1022 | } |
| 1014 | 1023 | ||
| 1015 | int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, | 1024 | int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) |
| 1016 | gfp_t mask) | ||
| 1017 | { | 1025 | { |
| 1018 | return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, | 1026 | return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, |
| 1019 | mask); | 1027 | mask); |
| @@ -1735,6 +1743,9 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
| 1735 | 1743 | ||
| 1736 | do { | 1744 | do { |
| 1737 | struct page *page = bvec->bv_page; | 1745 | struct page *page = bvec->bv_page; |
| 1746 | struct extent_state *cached = NULL; | ||
| 1747 | struct extent_state *state; | ||
| 1748 | |||
| 1738 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 1749 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
| 1739 | 1750 | ||
| 1740 | start = ((u64)page->index << PAGE_CACHE_SHIFT) + | 1751 | start = ((u64)page->index << PAGE_CACHE_SHIFT) + |
| @@ -1749,9 +1760,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
| 1749 | if (++bvec <= bvec_end) | 1760 | if (++bvec <= bvec_end) |
| 1750 | prefetchw(&bvec->bv_page->flags); | 1761 | prefetchw(&bvec->bv_page->flags); |
| 1751 | 1762 | ||
| 1763 | spin_lock(&tree->lock); | ||
| 1764 | state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED); | ||
| 1765 | if (state && state->start == start) { | ||
| 1766 | /* | ||
| 1767 | * take a reference on the state, unlock will drop | ||
| 1768 | * the ref | ||
| 1769 | */ | ||
| 1770 | cache_state(state, &cached); | ||
| 1771 | } | ||
| 1772 | spin_unlock(&tree->lock); | ||
| 1773 | |||
| 1752 | if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { | 1774 | if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { |
| 1753 | ret = tree->ops->readpage_end_io_hook(page, start, end, | 1775 | ret = tree->ops->readpage_end_io_hook(page, start, end, |
| 1754 | NULL); | 1776 | state); |
| 1755 | if (ret) | 1777 | if (ret) |
| 1756 | uptodate = 0; | 1778 | uptodate = 0; |
| 1757 | } | 1779 | } |
| @@ -1764,15 +1786,16 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
| 1764 | test_bit(BIO_UPTODATE, &bio->bi_flags); | 1786 | test_bit(BIO_UPTODATE, &bio->bi_flags); |
| 1765 | if (err) | 1787 | if (err) |
| 1766 | uptodate = 0; | 1788 | uptodate = 0; |
| 1789 | uncache_state(&cached); | ||
| 1767 | continue; | 1790 | continue; |
| 1768 | } | 1791 | } |
| 1769 | } | 1792 | } |
| 1770 | 1793 | ||
| 1771 | if (uptodate) { | 1794 | if (uptodate) { |
| 1772 | set_extent_uptodate(tree, start, end, | 1795 | set_extent_uptodate(tree, start, end, &cached, |
| 1773 | GFP_ATOMIC); | 1796 | GFP_ATOMIC); |
| 1774 | } | 1797 | } |
| 1775 | unlock_extent(tree, start, end, GFP_ATOMIC); | 1798 | unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); |
| 1776 | 1799 | ||
| 1777 | if (whole_page) { | 1800 | if (whole_page) { |
| 1778 | if (uptodate) { | 1801 | if (uptodate) { |
| @@ -1811,6 +1834,7 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err) | |||
| 1811 | 1834 | ||
| 1812 | do { | 1835 | do { |
| 1813 | struct page *page = bvec->bv_page; | 1836 | struct page *page = bvec->bv_page; |
| 1837 | struct extent_state *cached = NULL; | ||
| 1814 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 1838 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
| 1815 | 1839 | ||
| 1816 | start = ((u64)page->index << PAGE_CACHE_SHIFT) + | 1840 | start = ((u64)page->index << PAGE_CACHE_SHIFT) + |
| @@ -1821,13 +1845,14 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err) | |||
| 1821 | prefetchw(&bvec->bv_page->flags); | 1845 | prefetchw(&bvec->bv_page->flags); |
| 1822 | 1846 | ||
| 1823 | if (uptodate) { | 1847 | if (uptodate) { |
| 1824 | set_extent_uptodate(tree, start, end, GFP_ATOMIC); | 1848 | set_extent_uptodate(tree, start, end, &cached, |
| 1849 | GFP_ATOMIC); | ||
| 1825 | } else { | 1850 | } else { |
| 1826 | ClearPageUptodate(page); | 1851 | ClearPageUptodate(page); |
| 1827 | SetPageError(page); | 1852 | SetPageError(page); |
| 1828 | } | 1853 | } |
| 1829 | 1854 | ||
| 1830 | unlock_extent(tree, start, end, GFP_ATOMIC); | 1855 | unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); |
| 1831 | 1856 | ||
| 1832 | } while (bvec >= bio->bi_io_vec); | 1857 | } while (bvec >= bio->bi_io_vec); |
| 1833 | 1858 | ||
| @@ -2016,14 +2041,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
| 2016 | while (cur <= end) { | 2041 | while (cur <= end) { |
| 2017 | if (cur >= last_byte) { | 2042 | if (cur >= last_byte) { |
| 2018 | char *userpage; | 2043 | char *userpage; |
| 2044 | struct extent_state *cached = NULL; | ||
| 2045 | |||
| 2019 | iosize = PAGE_CACHE_SIZE - page_offset; | 2046 | iosize = PAGE_CACHE_SIZE - page_offset; |
| 2020 | userpage = kmap_atomic(page, KM_USER0); | 2047 | userpage = kmap_atomic(page, KM_USER0); |
| 2021 | memset(userpage + page_offset, 0, iosize); | 2048 | memset(userpage + page_offset, 0, iosize); |
| 2022 | flush_dcache_page(page); | 2049 | flush_dcache_page(page); |
| 2023 | kunmap_atomic(userpage, KM_USER0); | 2050 | kunmap_atomic(userpage, KM_USER0); |
| 2024 | set_extent_uptodate(tree, cur, cur + iosize - 1, | 2051 | set_extent_uptodate(tree, cur, cur + iosize - 1, |
| 2025 | GFP_NOFS); | 2052 | &cached, GFP_NOFS); |
| 2026 | unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); | 2053 | unlock_extent_cached(tree, cur, cur + iosize - 1, |
| 2054 | &cached, GFP_NOFS); | ||
| 2027 | break; | 2055 | break; |
| 2028 | } | 2056 | } |
| 2029 | em = get_extent(inode, page, page_offset, cur, | 2057 | em = get_extent(inode, page, page_offset, cur, |
| @@ -2063,14 +2091,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
| 2063 | /* we've found a hole, just zero and go on */ | 2091 | /* we've found a hole, just zero and go on */ |
| 2064 | if (block_start == EXTENT_MAP_HOLE) { | 2092 | if (block_start == EXTENT_MAP_HOLE) { |
| 2065 | char *userpage; | 2093 | char *userpage; |
| 2094 | struct extent_state *cached = NULL; | ||
| 2095 | |||
| 2066 | userpage = kmap_atomic(page, KM_USER0); | 2096 | userpage = kmap_atomic(page, KM_USER0); |
| 2067 | memset(userpage + page_offset, 0, iosize); | 2097 | memset(userpage + page_offset, 0, iosize); |
| 2068 | flush_dcache_page(page); | 2098 | flush_dcache_page(page); |
| 2069 | kunmap_atomic(userpage, KM_USER0); | 2099 | kunmap_atomic(userpage, KM_USER0); |
| 2070 | 2100 | ||
| 2071 | set_extent_uptodate(tree, cur, cur + iosize - 1, | 2101 | set_extent_uptodate(tree, cur, cur + iosize - 1, |
| 2072 | GFP_NOFS); | 2102 | &cached, GFP_NOFS); |
| 2073 | unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); | 2103 | unlock_extent_cached(tree, cur, cur + iosize - 1, |
| 2104 | &cached, GFP_NOFS); | ||
| 2074 | cur = cur + iosize; | 2105 | cur = cur + iosize; |
| 2075 | page_offset += iosize; | 2106 | page_offset += iosize; |
| 2076 | continue; | 2107 | continue; |
| @@ -2789,9 +2820,12 @@ int extent_prepare_write(struct extent_io_tree *tree, | |||
| 2789 | iocount++; | 2820 | iocount++; |
| 2790 | block_start = block_start + iosize; | 2821 | block_start = block_start + iosize; |
| 2791 | } else { | 2822 | } else { |
| 2792 | set_extent_uptodate(tree, block_start, cur_end, | 2823 | struct extent_state *cached = NULL; |
| 2824 | |||
| 2825 | set_extent_uptodate(tree, block_start, cur_end, &cached, | ||
| 2793 | GFP_NOFS); | 2826 | GFP_NOFS); |
| 2794 | unlock_extent(tree, block_start, cur_end, GFP_NOFS); | 2827 | unlock_extent_cached(tree, block_start, cur_end, |
| 2828 | &cached, GFP_NOFS); | ||
| 2795 | block_start = cur_end + 1; | 2829 | block_start = cur_end + 1; |
| 2796 | } | 2830 | } |
| 2797 | page_offset = block_start & (PAGE_CACHE_SIZE - 1); | 2831 | page_offset = block_start & (PAGE_CACHE_SIZE - 1); |
| @@ -3457,7 +3491,7 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree, | |||
| 3457 | num_pages = num_extent_pages(eb->start, eb->len); | 3491 | num_pages = num_extent_pages(eb->start, eb->len); |
| 3458 | 3492 | ||
| 3459 | set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, | 3493 | set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, |
| 3460 | GFP_NOFS); | 3494 | NULL, GFP_NOFS); |
| 3461 | for (i = 0; i < num_pages; i++) { | 3495 | for (i = 0; i < num_pages; i++) { |
| 3462 | page = extent_buffer_page(eb, i); | 3496 | page = extent_buffer_page(eb, i); |
| 3463 | if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || | 3497 | if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || |
| @@ -3885,6 +3919,12 @@ static void move_pages(struct page *dst_page, struct page *src_page, | |||
| 3885 | kunmap_atomic(dst_kaddr, KM_USER0); | 3919 | kunmap_atomic(dst_kaddr, KM_USER0); |
| 3886 | } | 3920 | } |
| 3887 | 3921 | ||
| 3922 | static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len) | ||
| 3923 | { | ||
| 3924 | unsigned long distance = (src > dst) ? src - dst : dst - src; | ||
| 3925 | return distance < len; | ||
| 3926 | } | ||
| 3927 | |||
| 3888 | static void copy_pages(struct page *dst_page, struct page *src_page, | 3928 | static void copy_pages(struct page *dst_page, struct page *src_page, |
| 3889 | unsigned long dst_off, unsigned long src_off, | 3929 | unsigned long dst_off, unsigned long src_off, |
| 3890 | unsigned long len) | 3930 | unsigned long len) |
| @@ -3892,10 +3932,12 @@ static void copy_pages(struct page *dst_page, struct page *src_page, | |||
| 3892 | char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); | 3932 | char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); |
| 3893 | char *src_kaddr; | 3933 | char *src_kaddr; |
| 3894 | 3934 | ||
| 3895 | if (dst_page != src_page) | 3935 | if (dst_page != src_page) { |
| 3896 | src_kaddr = kmap_atomic(src_page, KM_USER1); | 3936 | src_kaddr = kmap_atomic(src_page, KM_USER1); |
| 3897 | else | 3937 | } else { |
| 3898 | src_kaddr = dst_kaddr; | 3938 | src_kaddr = dst_kaddr; |
| 3939 | BUG_ON(areas_overlap(src_off, dst_off, len)); | ||
| 3940 | } | ||
| 3899 | 3941 | ||
| 3900 | memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); | 3942 | memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); |
| 3901 | kunmap_atomic(dst_kaddr, KM_USER0); | 3943 | kunmap_atomic(dst_kaddr, KM_USER0); |
| @@ -3970,7 +4012,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, | |||
| 3970 | "len %lu len %lu\n", dst_offset, len, dst->len); | 4012 | "len %lu len %lu\n", dst_offset, len, dst->len); |
| 3971 | BUG_ON(1); | 4013 | BUG_ON(1); |
| 3972 | } | 4014 | } |
| 3973 | if (dst_offset < src_offset) { | 4015 | if (!areas_overlap(src_offset, dst_offset, len)) { |
| 3974 | memcpy_extent_buffer(dst, dst_offset, src_offset, len); | 4016 | memcpy_extent_buffer(dst, dst_offset, src_offset, len); |
| 3975 | return; | 4017 | return; |
| 3976 | } | 4018 | } |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index f62c5442835d..af2d7179c372 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
| @@ -208,7 +208,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
| 208 | int bits, int exclusive_bits, u64 *failed_start, | 208 | int bits, int exclusive_bits, u64 *failed_start, |
| 209 | struct extent_state **cached_state, gfp_t mask); | 209 | struct extent_state **cached_state, gfp_t mask); |
| 210 | int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, | 210 | int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, |
| 211 | gfp_t mask); | 211 | struct extent_state **cached_state, gfp_t mask); |
| 212 | int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, | 212 | int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, |
| 213 | gfp_t mask); | 213 | gfp_t mask); |
| 214 | int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, | 214 | int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index e621ea54a3fd..75899a01dded 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
| @@ -104,7 +104,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, | |||
| 104 | /* | 104 | /* |
| 105 | * unlocks pages after btrfs_file_write is done with them | 105 | * unlocks pages after btrfs_file_write is done with them |
| 106 | */ | 106 | */ |
| 107 | static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) | 107 | void btrfs_drop_pages(struct page **pages, size_t num_pages) |
| 108 | { | 108 | { |
| 109 | size_t i; | 109 | size_t i; |
| 110 | for (i = 0; i < num_pages; i++) { | 110 | for (i = 0; i < num_pages; i++) { |
| @@ -127,16 +127,13 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) | |||
| 127 | * this also makes the decision about creating an inline extent vs | 127 | * this also makes the decision about creating an inline extent vs |
| 128 | * doing real data extents, marking pages dirty and delalloc as required. | 128 | * doing real data extents, marking pages dirty and delalloc as required. |
| 129 | */ | 129 | */ |
| 130 | static noinline int dirty_and_release_pages(struct btrfs_root *root, | 130 | int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, |
| 131 | struct file *file, | 131 | struct page **pages, size_t num_pages, |
| 132 | struct page **pages, | 132 | loff_t pos, size_t write_bytes, |
| 133 | size_t num_pages, | 133 | struct extent_state **cached) |
| 134 | loff_t pos, | ||
| 135 | size_t write_bytes) | ||
| 136 | { | 134 | { |
| 137 | int err = 0; | 135 | int err = 0; |
| 138 | int i; | 136 | int i; |
| 139 | struct inode *inode = fdentry(file)->d_inode; | ||
| 140 | u64 num_bytes; | 137 | u64 num_bytes; |
| 141 | u64 start_pos; | 138 | u64 start_pos; |
| 142 | u64 end_of_last_block; | 139 | u64 end_of_last_block; |
| @@ -149,7 +146,7 @@ static noinline int dirty_and_release_pages(struct btrfs_root *root, | |||
| 149 | 146 | ||
| 150 | end_of_last_block = start_pos + num_bytes - 1; | 147 | end_of_last_block = start_pos + num_bytes - 1; |
| 151 | err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, | 148 | err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, |
| 152 | NULL); | 149 | cached); |
| 153 | if (err) | 150 | if (err) |
| 154 | return err; | 151 | return err; |
| 155 | 152 | ||
| @@ -992,9 +989,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, | |||
| 992 | } | 989 | } |
| 993 | 990 | ||
| 994 | if (copied > 0) { | 991 | if (copied > 0) { |
| 995 | ret = dirty_and_release_pages(root, file, pages, | 992 | ret = btrfs_dirty_pages(root, inode, pages, |
| 996 | dirty_pages, pos, | 993 | dirty_pages, pos, copied, |
| 997 | copied); | 994 | NULL); |
| 998 | if (ret) { | 995 | if (ret) { |
| 999 | btrfs_delalloc_release_space(inode, | 996 | btrfs_delalloc_release_space(inode, |
| 1000 | dirty_pages << PAGE_CACHE_SHIFT); | 997 | dirty_pages << PAGE_CACHE_SHIFT); |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index f561c953205b..11d2e9cea09e 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
| @@ -508,6 +508,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
| 508 | struct inode *inode; | 508 | struct inode *inode; |
| 509 | struct rb_node *node; | 509 | struct rb_node *node; |
| 510 | struct list_head *pos, *n; | 510 | struct list_head *pos, *n; |
| 511 | struct page **pages; | ||
| 511 | struct page *page; | 512 | struct page *page; |
| 512 | struct extent_state *cached_state = NULL; | 513 | struct extent_state *cached_state = NULL; |
| 513 | struct btrfs_free_cluster *cluster = NULL; | 514 | struct btrfs_free_cluster *cluster = NULL; |
| @@ -517,13 +518,13 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
| 517 | u64 start, end, len; | 518 | u64 start, end, len; |
| 518 | u64 bytes = 0; | 519 | u64 bytes = 0; |
| 519 | u32 *crc, *checksums; | 520 | u32 *crc, *checksums; |
| 520 | pgoff_t index = 0, last_index = 0; | ||
| 521 | unsigned long first_page_offset; | 521 | unsigned long first_page_offset; |
| 522 | int num_checksums; | 522 | int index = 0, num_pages = 0; |
| 523 | int entries = 0; | 523 | int entries = 0; |
| 524 | int bitmaps = 0; | 524 | int bitmaps = 0; |
| 525 | int ret = 0; | 525 | int ret = 0; |
| 526 | bool next_page = false; | 526 | bool next_page = false; |
| 527 | bool out_of_space = false; | ||
| 527 | 528 | ||
| 528 | root = root->fs_info->tree_root; | 529 | root = root->fs_info->tree_root; |
| 529 | 530 | ||
| @@ -551,24 +552,31 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
| 551 | return 0; | 552 | return 0; |
| 552 | } | 553 | } |
| 553 | 554 | ||
| 554 | last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; | 555 | num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> |
| 556 | PAGE_CACHE_SHIFT; | ||
| 555 | filemap_write_and_wait(inode->i_mapping); | 557 | filemap_write_and_wait(inode->i_mapping); |
| 556 | btrfs_wait_ordered_range(inode, inode->i_size & | 558 | btrfs_wait_ordered_range(inode, inode->i_size & |
| 557 | ~(root->sectorsize - 1), (u64)-1); | 559 | ~(root->sectorsize - 1), (u64)-1); |
| 558 | 560 | ||
| 559 | /* We need a checksum per page. */ | 561 | /* We need a checksum per page. */ |
| 560 | num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE; | 562 | crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); |
| 561 | crc = checksums = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS); | ||
| 562 | if (!crc) { | 563 | if (!crc) { |
| 563 | iput(inode); | 564 | iput(inode); |
| 564 | return 0; | 565 | return 0; |
| 565 | } | 566 | } |
| 566 | 567 | ||
| 568 | pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); | ||
| 569 | if (!pages) { | ||
| 570 | kfree(crc); | ||
| 571 | iput(inode); | ||
| 572 | return 0; | ||
| 573 | } | ||
| 574 | |||
| 567 | /* Since the first page has all of our checksums and our generation we | 575 | /* Since the first page has all of our checksums and our generation we |
| 568 | * need to calculate the offset into the page that we can start writing | 576 | * need to calculate the offset into the page that we can start writing |
| 569 | * our entries. | 577 | * our entries. |
| 570 | */ | 578 | */ |
| 571 | first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64); | 579 | first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); |
| 572 | 580 | ||
| 573 | /* Get the cluster for this block_group if it exists */ | 581 | /* Get the cluster for this block_group if it exists */ |
| 574 | if (!list_empty(&block_group->cluster_list)) | 582 | if (!list_empty(&block_group->cluster_list)) |
| @@ -590,20 +598,18 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
| 590 | * after find_get_page at this point. Just putting this here so people | 598 | * after find_get_page at this point. Just putting this here so people |
| 591 | * know and don't freak out. | 599 | * know and don't freak out. |
| 592 | */ | 600 | */ |
| 593 | while (index <= last_index) { | 601 | while (index < num_pages) { |
| 594 | page = grab_cache_page(inode->i_mapping, index); | 602 | page = grab_cache_page(inode->i_mapping, index); |
| 595 | if (!page) { | 603 | if (!page) { |
| 596 | pgoff_t i = 0; | 604 | int i; |
| 597 | 605 | ||
| 598 | while (i < index) { | 606 | for (i = 0; i < num_pages; i++) { |
| 599 | page = find_get_page(inode->i_mapping, i); | 607 | unlock_page(pages[i]); |
| 600 | unlock_page(page); | 608 | page_cache_release(pages[i]); |
| 601 | page_cache_release(page); | ||
| 602 | page_cache_release(page); | ||
| 603 | i++; | ||
| 604 | } | 609 | } |
| 605 | goto out_free; | 610 | goto out_free; |
| 606 | } | 611 | } |
| 612 | pages[index] = page; | ||
| 607 | index++; | 613 | index++; |
| 608 | } | 614 | } |
| 609 | 615 | ||
| @@ -631,7 +637,12 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
| 631 | offset = start_offset; | 637 | offset = start_offset; |
| 632 | } | 638 | } |
| 633 | 639 | ||
| 634 | page = find_get_page(inode->i_mapping, index); | 640 | if (index >= num_pages) { |
| 641 | out_of_space = true; | ||
| 642 | break; | ||
| 643 | } | ||
| 644 | |||
| 645 | page = pages[index]; | ||
| 635 | 646 | ||
| 636 | addr = kmap(page); | 647 | addr = kmap(page); |
| 637 | entry = addr + start_offset; | 648 | entry = addr + start_offset; |
| @@ -708,23 +719,6 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
| 708 | 719 | ||
| 709 | bytes += PAGE_CACHE_SIZE; | 720 | bytes += PAGE_CACHE_SIZE; |
| 710 | 721 | ||
| 711 | ClearPageChecked(page); | ||
| 712 | set_page_extent_mapped(page); | ||
| 713 | SetPageUptodate(page); | ||
| 714 | set_page_dirty(page); | ||
| 715 | |||
| 716 | /* | ||
| 717 | * We need to release our reference we got for grab_cache_page, | ||
| 718 | * except for the first page which will hold our checksums, we | ||
| 719 | * do that below. | ||
| 720 | */ | ||
| 721 | if (index != 0) { | ||
| 722 | unlock_page(page); | ||
| 723 | page_cache_release(page); | ||
| 724 | } | ||
| 725 | |||
| 726 | page_cache_release(page); | ||
| 727 | |||
| 728 | index++; | 722 | index++; |
| 729 | } while (node || next_page); | 723 | } while (node || next_page); |
| 730 | 724 | ||
| @@ -734,7 +728,11 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
| 734 | struct btrfs_free_space *entry = | 728 | struct btrfs_free_space *entry = |
| 735 | list_entry(pos, struct btrfs_free_space, list); | 729 | list_entry(pos, struct btrfs_free_space, list); |
| 736 | 730 | ||
| 737 | page = find_get_page(inode->i_mapping, index); | 731 | if (index >= num_pages) { |
| 732 | out_of_space = true; | ||
| 733 | break; | ||
| 734 | } | ||
| 735 | page = pages[index]; | ||
| 738 | 736 | ||
| 739 | addr = kmap(page); | 737 | addr = kmap(page); |
| 740 | memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE); | 738 | memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE); |
| @@ -745,64 +743,58 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
| 745 | crc++; | 743 | crc++; |
| 746 | bytes += PAGE_CACHE_SIZE; | 744 | bytes += PAGE_CACHE_SIZE; |
| 747 | 745 | ||
| 748 | ClearPageChecked(page); | ||
| 749 | set_page_extent_mapped(page); | ||
| 750 | SetPageUptodate(page); | ||
| 751 | set_page_dirty(page); | ||
| 752 | unlock_page(page); | ||
| 753 | page_cache_release(page); | ||
| 754 | page_cache_release(page); | ||
| 755 | list_del_init(&entry->list); | 746 | list_del_init(&entry->list); |
| 756 | index++; | 747 | index++; |
| 757 | } | 748 | } |
| 758 | 749 | ||
| 750 | if (out_of_space) { | ||
| 751 | btrfs_drop_pages(pages, num_pages); | ||
| 752 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, | ||
| 753 | i_size_read(inode) - 1, &cached_state, | ||
| 754 | GFP_NOFS); | ||
| 755 | ret = 0; | ||
| 756 | goto out_free; | ||
| 757 | } | ||
| 758 | |||
| 759 | /* Zero out the rest of the pages just to make sure */ | 759 | /* Zero out the rest of the pages just to make sure */ |
| 760 | while (index <= last_index) { | 760 | while (index < num_pages) { |
| 761 | void *addr; | 761 | void *addr; |
| 762 | 762 | ||
| 763 | page = find_get_page(inode->i_mapping, index); | 763 | page = pages[index]; |
| 764 | |||
| 765 | addr = kmap(page); | 764 | addr = kmap(page); |
| 766 | memset(addr, 0, PAGE_CACHE_SIZE); | 765 | memset(addr, 0, PAGE_CACHE_SIZE); |
| 767 | kunmap(page); | 766 | kunmap(page); |
| 768 | ClearPageChecked(page); | ||
| 769 | set_page_extent_mapped(page); | ||
| 770 | SetPageUptodate(page); | ||
| 771 | set_page_dirty(page); | ||
| 772 | unlock_page(page); | ||
| 773 | page_cache_release(page); | ||
| 774 | page_cache_release(page); | ||
| 775 | bytes += PAGE_CACHE_SIZE; | 767 | bytes += PAGE_CACHE_SIZE; |
| 776 | index++; | 768 | index++; |
| 777 | } | 769 | } |
| 778 | 770 | ||
| 779 | btrfs_set_extent_delalloc(inode, 0, bytes - 1, &cached_state); | ||
| 780 | |||
| 781 | /* Write the checksums and trans id to the first page */ | 771 | /* Write the checksums and trans id to the first page */ |
| 782 | { | 772 | { |
| 783 | void *addr; | 773 | void *addr; |
| 784 | u64 *gen; | 774 | u64 *gen; |
| 785 | 775 | ||
| 786 | page = find_get_page(inode->i_mapping, 0); | 776 | page = pages[0]; |
| 787 | 777 | ||
| 788 | addr = kmap(page); | 778 | addr = kmap(page); |
| 789 | memcpy(addr, checksums, sizeof(u32) * num_checksums); | 779 | memcpy(addr, checksums, sizeof(u32) * num_pages); |
| 790 | gen = addr + (sizeof(u32) * num_checksums); | 780 | gen = addr + (sizeof(u32) * num_pages); |
| 791 | *gen = trans->transid; | 781 | *gen = trans->transid; |
| 792 | kunmap(page); | 782 | kunmap(page); |
| 793 | ClearPageChecked(page); | ||
| 794 | set_page_extent_mapped(page); | ||
| 795 | SetPageUptodate(page); | ||
| 796 | set_page_dirty(page); | ||
| 797 | unlock_page(page); | ||
| 798 | page_cache_release(page); | ||
| 799 | page_cache_release(page); | ||
| 800 | } | 783 | } |
| 801 | BTRFS_I(inode)->generation = trans->transid; | ||
| 802 | 784 | ||
| 785 | ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0, | ||
| 786 | bytes, &cached_state); | ||
| 787 | btrfs_drop_pages(pages, num_pages); | ||
| 803 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, | 788 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, |
| 804 | i_size_read(inode) - 1, &cached_state, GFP_NOFS); | 789 | i_size_read(inode) - 1, &cached_state, GFP_NOFS); |
| 805 | 790 | ||
| 791 | if (ret) { | ||
| 792 | ret = 0; | ||
| 793 | goto out_free; | ||
| 794 | } | ||
| 795 | |||
| 796 | BTRFS_I(inode)->generation = trans->transid; | ||
| 797 | |||
| 806 | filemap_write_and_wait(inode->i_mapping); | 798 | filemap_write_and_wait(inode->i_mapping); |
| 807 | 799 | ||
| 808 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | 800 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
| @@ -853,6 +845,7 @@ out_free: | |||
| 853 | BTRFS_I(inode)->generation = 0; | 845 | BTRFS_I(inode)->generation = 0; |
| 854 | } | 846 | } |
| 855 | kfree(checksums); | 847 | kfree(checksums); |
| 848 | kfree(pages); | ||
| 856 | btrfs_update_inode(trans, root, inode); | 849 | btrfs_update_inode(trans, root, inode); |
| 857 | iput(inode); | 850 | iput(inode); |
| 858 | return ret; | 851 | return ret; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5cc64ab9c485..fcd66b6a8086 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -1770,9 +1770,12 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
| 1770 | add_pending_csums(trans, inode, ordered_extent->file_offset, | 1770 | add_pending_csums(trans, inode, ordered_extent->file_offset, |
| 1771 | &ordered_extent->list); | 1771 | &ordered_extent->list); |
| 1772 | 1772 | ||
| 1773 | btrfs_ordered_update_i_size(inode, 0, ordered_extent); | 1773 | ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); |
| 1774 | ret = btrfs_update_inode(trans, root, inode); | 1774 | if (!ret) { |
| 1775 | BUG_ON(ret); | 1775 | ret = btrfs_update_inode(trans, root, inode); |
| 1776 | BUG_ON(ret); | ||
| 1777 | } | ||
| 1778 | ret = 0; | ||
| 1776 | out: | 1779 | out: |
| 1777 | if (nolock) { | 1780 | if (nolock) { |
| 1778 | if (trans) | 1781 | if (trans) |
| @@ -2590,6 +2593,13 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, | |||
| 2590 | struct btrfs_inode_item *item, | 2593 | struct btrfs_inode_item *item, |
| 2591 | struct inode *inode) | 2594 | struct inode *inode) |
| 2592 | { | 2595 | { |
| 2596 | if (!leaf->map_token) | ||
| 2597 | map_private_extent_buffer(leaf, (unsigned long)item, | ||
| 2598 | sizeof(struct btrfs_inode_item), | ||
| 2599 | &leaf->map_token, &leaf->kaddr, | ||
| 2600 | &leaf->map_start, &leaf->map_len, | ||
| 2601 | KM_USER1); | ||
| 2602 | |||
| 2593 | btrfs_set_inode_uid(leaf, item, inode->i_uid); | 2603 | btrfs_set_inode_uid(leaf, item, inode->i_uid); |
| 2594 | btrfs_set_inode_gid(leaf, item, inode->i_gid); | 2604 | btrfs_set_inode_gid(leaf, item, inode->i_gid); |
| 2595 | btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); | 2605 | btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); |
| @@ -2618,6 +2628,11 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, | |||
| 2618 | btrfs_set_inode_rdev(leaf, item, inode->i_rdev); | 2628 | btrfs_set_inode_rdev(leaf, item, inode->i_rdev); |
| 2619 | btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); | 2629 | btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); |
| 2620 | btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group); | 2630 | btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group); |
| 2631 | |||
| 2632 | if (leaf->map_token) { | ||
| 2633 | unmap_extent_buffer(leaf, leaf->map_token, KM_USER1); | ||
| 2634 | leaf->map_token = NULL; | ||
| 2635 | } | ||
| 2621 | } | 2636 | } |
| 2622 | 2637 | ||
| 2623 | /* | 2638 | /* |
| @@ -4207,10 +4222,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, | |||
| 4207 | struct btrfs_key found_key; | 4222 | struct btrfs_key found_key; |
| 4208 | struct btrfs_path *path; | 4223 | struct btrfs_path *path; |
| 4209 | int ret; | 4224 | int ret; |
| 4210 | u32 nritems; | ||
| 4211 | struct extent_buffer *leaf; | 4225 | struct extent_buffer *leaf; |
| 4212 | int slot; | 4226 | int slot; |
| 4213 | int advance; | ||
| 4214 | unsigned char d_type; | 4227 | unsigned char d_type; |
| 4215 | int over = 0; | 4228 | int over = 0; |
| 4216 | u32 di_cur; | 4229 | u32 di_cur; |
| @@ -4253,27 +4266,19 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, | |||
| 4253 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 4266 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| 4254 | if (ret < 0) | 4267 | if (ret < 0) |
| 4255 | goto err; | 4268 | goto err; |
| 4256 | advance = 0; | ||
| 4257 | 4269 | ||
| 4258 | while (1) { | 4270 | while (1) { |
| 4259 | leaf = path->nodes[0]; | 4271 | leaf = path->nodes[0]; |
| 4260 | nritems = btrfs_header_nritems(leaf); | ||
| 4261 | slot = path->slots[0]; | 4272 | slot = path->slots[0]; |
| 4262 | if (advance || slot >= nritems) { | 4273 | if (slot >= btrfs_header_nritems(leaf)) { |
| 4263 | if (slot >= nritems - 1) { | 4274 | ret = btrfs_next_leaf(root, path); |
| 4264 | ret = btrfs_next_leaf(root, path); | 4275 | if (ret < 0) |
| 4265 | if (ret) | 4276 | goto err; |
| 4266 | break; | 4277 | else if (ret > 0) |
| 4267 | leaf = path->nodes[0]; | 4278 | break; |
| 4268 | nritems = btrfs_header_nritems(leaf); | 4279 | continue; |
| 4269 | slot = path->slots[0]; | ||
| 4270 | } else { | ||
| 4271 | slot++; | ||
| 4272 | path->slots[0]++; | ||
| 4273 | } | ||
| 4274 | } | 4280 | } |
| 4275 | 4281 | ||
| 4276 | advance = 1; | ||
| 4277 | item = btrfs_item_nr(leaf, slot); | 4282 | item = btrfs_item_nr(leaf, slot); |
| 4278 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | 4283 | btrfs_item_key_to_cpu(leaf, &found_key, slot); |
| 4279 | 4284 | ||
| @@ -4282,7 +4287,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, | |||
| 4282 | if (btrfs_key_type(&found_key) != key_type) | 4287 | if (btrfs_key_type(&found_key) != key_type) |
| 4283 | break; | 4288 | break; |
| 4284 | if (found_key.offset < filp->f_pos) | 4289 | if (found_key.offset < filp->f_pos) |
| 4285 | continue; | 4290 | goto next; |
| 4286 | 4291 | ||
| 4287 | filp->f_pos = found_key.offset; | 4292 | filp->f_pos = found_key.offset; |
| 4288 | 4293 | ||
| @@ -4335,6 +4340,8 @@ skip: | |||
| 4335 | di_cur += di_len; | 4340 | di_cur += di_len; |
| 4336 | di = (struct btrfs_dir_item *)((char *)di + di_len); | 4341 | di = (struct btrfs_dir_item *)((char *)di + di_len); |
| 4337 | } | 4342 | } |
| 4343 | next: | ||
| 4344 | path->slots[0]++; | ||
| 4338 | } | 4345 | } |
| 4339 | 4346 | ||
| 4340 | /* Reached end of directory/root. Bump pos past the last item. */ | 4347 | /* Reached end of directory/root. Bump pos past the last item. */ |
| @@ -4527,14 +4534,17 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
| 4527 | BUG_ON(!path); | 4534 | BUG_ON(!path); |
| 4528 | 4535 | ||
| 4529 | inode = new_inode(root->fs_info->sb); | 4536 | inode = new_inode(root->fs_info->sb); |
| 4530 | if (!inode) | 4537 | if (!inode) { |
| 4538 | btrfs_free_path(path); | ||
| 4531 | return ERR_PTR(-ENOMEM); | 4539 | return ERR_PTR(-ENOMEM); |
| 4540 | } | ||
| 4532 | 4541 | ||
| 4533 | if (dir) { | 4542 | if (dir) { |
| 4534 | trace_btrfs_inode_request(dir); | 4543 | trace_btrfs_inode_request(dir); |
| 4535 | 4544 | ||
| 4536 | ret = btrfs_set_inode_index(dir, index); | 4545 | ret = btrfs_set_inode_index(dir, index); |
| 4537 | if (ret) { | 4546 | if (ret) { |
| 4547 | btrfs_free_path(path); | ||
| 4538 | iput(inode); | 4548 | iput(inode); |
| 4539 | return ERR_PTR(ret); | 4549 | return ERR_PTR(ret); |
| 4540 | } | 4550 | } |
| @@ -4834,9 +4844,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, | |||
| 4834 | if (inode->i_nlink == ~0U) | 4844 | if (inode->i_nlink == ~0U) |
| 4835 | return -EMLINK; | 4845 | return -EMLINK; |
| 4836 | 4846 | ||
| 4837 | btrfs_inc_nlink(inode); | ||
| 4838 | inode->i_ctime = CURRENT_TIME; | ||
| 4839 | |||
| 4840 | err = btrfs_set_inode_index(dir, &index); | 4847 | err = btrfs_set_inode_index(dir, &index); |
| 4841 | if (err) | 4848 | if (err) |
| 4842 | goto fail; | 4849 | goto fail; |
| @@ -4852,6 +4859,9 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, | |||
| 4852 | goto fail; | 4859 | goto fail; |
| 4853 | } | 4860 | } |
| 4854 | 4861 | ||
| 4862 | btrfs_inc_nlink(inode); | ||
| 4863 | inode->i_ctime = CURRENT_TIME; | ||
| 4864 | |||
| 4855 | btrfs_set_trans_block_group(trans, dir); | 4865 | btrfs_set_trans_block_group(trans, dir); |
| 4856 | ihold(inode); | 4866 | ihold(inode); |
| 4857 | 4867 | ||
| @@ -5221,7 +5231,7 @@ again: | |||
| 5221 | btrfs_mark_buffer_dirty(leaf); | 5231 | btrfs_mark_buffer_dirty(leaf); |
| 5222 | } | 5232 | } |
| 5223 | set_extent_uptodate(io_tree, em->start, | 5233 | set_extent_uptodate(io_tree, em->start, |
| 5224 | extent_map_end(em) - 1, GFP_NOFS); | 5234 | extent_map_end(em) - 1, NULL, GFP_NOFS); |
| 5225 | goto insert; | 5235 | goto insert; |
| 5226 | } else { | 5236 | } else { |
| 5227 | printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); | 5237 | printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); |
| @@ -5428,17 +5438,30 @@ out: | |||
| 5428 | } | 5438 | } |
| 5429 | 5439 | ||
| 5430 | static struct extent_map *btrfs_new_extent_direct(struct inode *inode, | 5440 | static struct extent_map *btrfs_new_extent_direct(struct inode *inode, |
| 5441 | struct extent_map *em, | ||
| 5431 | u64 start, u64 len) | 5442 | u64 start, u64 len) |
| 5432 | { | 5443 | { |
| 5433 | struct btrfs_root *root = BTRFS_I(inode)->root; | 5444 | struct btrfs_root *root = BTRFS_I(inode)->root; |
| 5434 | struct btrfs_trans_handle *trans; | 5445 | struct btrfs_trans_handle *trans; |
| 5435 | struct extent_map *em; | ||
| 5436 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | 5446 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
| 5437 | struct btrfs_key ins; | 5447 | struct btrfs_key ins; |
| 5438 | u64 alloc_hint; | 5448 | u64 alloc_hint; |
| 5439 | int ret; | 5449 | int ret; |
| 5450 | bool insert = false; | ||
| 5440 | 5451 | ||
| 5441 | btrfs_drop_extent_cache(inode, start, start + len - 1, 0); | 5452 | /* |
| 5453 | * Ok if the extent map we looked up is a hole and is for the exact | ||
| 5454 | * range we want, there is no reason to allocate a new one, however if | ||
| 5455 | * it is not right then we need to free this one and drop the cache for | ||
| 5456 | * our range. | ||
| 5457 | */ | ||
| 5458 | if (em->block_start != EXTENT_MAP_HOLE || em->start != start || | ||
| 5459 | em->len != len) { | ||
| 5460 | free_extent_map(em); | ||
| 5461 | em = NULL; | ||
| 5462 | insert = true; | ||
| 5463 | btrfs_drop_extent_cache(inode, start, start + len - 1, 0); | ||
| 5464 | } | ||
| 5442 | 5465 | ||
| 5443 | trans = btrfs_join_transaction(root, 0); | 5466 | trans = btrfs_join_transaction(root, 0); |
| 5444 | if (IS_ERR(trans)) | 5467 | if (IS_ERR(trans)) |
| @@ -5454,10 +5477,12 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, | |||
| 5454 | goto out; | 5477 | goto out; |
| 5455 | } | 5478 | } |
| 5456 | 5479 | ||
| 5457 | em = alloc_extent_map(GFP_NOFS); | ||
| 5458 | if (!em) { | 5480 | if (!em) { |
| 5459 | em = ERR_PTR(-ENOMEM); | 5481 | em = alloc_extent_map(GFP_NOFS); |
| 5460 | goto out; | 5482 | if (!em) { |
| 5483 | em = ERR_PTR(-ENOMEM); | ||
| 5484 | goto out; | ||
| 5485 | } | ||
| 5461 | } | 5486 | } |
| 5462 | 5487 | ||
| 5463 | em->start = start; | 5488 | em->start = start; |
| @@ -5467,9 +5492,15 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, | |||
| 5467 | em->block_start = ins.objectid; | 5492 | em->block_start = ins.objectid; |
| 5468 | em->block_len = ins.offset; | 5493 | em->block_len = ins.offset; |
| 5469 | em->bdev = root->fs_info->fs_devices->latest_bdev; | 5494 | em->bdev = root->fs_info->fs_devices->latest_bdev; |
| 5495 | |||
| 5496 | /* | ||
| 5497 | * We need to do this because if we're using the original em we searched | ||
| 5498 | * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that. | ||
| 5499 | */ | ||
| 5500 | em->flags = 0; | ||
| 5470 | set_bit(EXTENT_FLAG_PINNED, &em->flags); | 5501 | set_bit(EXTENT_FLAG_PINNED, &em->flags); |
| 5471 | 5502 | ||
| 5472 | while (1) { | 5503 | while (insert) { |
| 5473 | write_lock(&em_tree->lock); | 5504 | write_lock(&em_tree->lock); |
| 5474 | ret = add_extent_mapping(em_tree, em); | 5505 | ret = add_extent_mapping(em_tree, em); |
| 5475 | write_unlock(&em_tree->lock); | 5506 | write_unlock(&em_tree->lock); |
| @@ -5687,8 +5718,7 @@ must_cow: | |||
| 5687 | * it above | 5718 | * it above |
| 5688 | */ | 5719 | */ |
| 5689 | len = bh_result->b_size; | 5720 | len = bh_result->b_size; |
| 5690 | free_extent_map(em); | 5721 | em = btrfs_new_extent_direct(inode, em, start, len); |
| 5691 | em = btrfs_new_extent_direct(inode, start, len); | ||
| 5692 | if (IS_ERR(em)) | 5722 | if (IS_ERR(em)) |
| 5693 | return PTR_ERR(em); | 5723 | return PTR_ERR(em); |
| 5694 | len = min(len, em->len - (start - em->start)); | 5724 | len = min(len, em->len - (start - em->start)); |
| @@ -5851,8 +5881,10 @@ again: | |||
| 5851 | } | 5881 | } |
| 5852 | 5882 | ||
| 5853 | add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); | 5883 | add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); |
| 5854 | btrfs_ordered_update_i_size(inode, 0, ordered); | 5884 | ret = btrfs_ordered_update_i_size(inode, 0, ordered); |
| 5855 | btrfs_update_inode(trans, root, inode); | 5885 | if (!ret) |
| 5886 | btrfs_update_inode(trans, root, inode); | ||
| 5887 | ret = 0; | ||
| 5856 | out_unlock: | 5888 | out_unlock: |
| 5857 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, | 5889 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, |
| 5858 | ordered->file_offset + ordered->len - 1, | 5890 | ordered->file_offset + ordered->len - 1, |
| @@ -5938,7 +5970,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, | |||
| 5938 | 5970 | ||
| 5939 | static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, | 5971 | static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, |
| 5940 | int rw, u64 file_offset, int skip_sum, | 5972 | int rw, u64 file_offset, int skip_sum, |
| 5941 | u32 *csums) | 5973 | u32 *csums, int async_submit) |
| 5942 | { | 5974 | { |
| 5943 | int write = rw & REQ_WRITE; | 5975 | int write = rw & REQ_WRITE; |
| 5944 | struct btrfs_root *root = BTRFS_I(inode)->root; | 5976 | struct btrfs_root *root = BTRFS_I(inode)->root; |
| @@ -5949,13 +5981,24 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, | |||
| 5949 | if (ret) | 5981 | if (ret) |
| 5950 | goto err; | 5982 | goto err; |
| 5951 | 5983 | ||
| 5952 | if (write && !skip_sum) { | 5984 | if (skip_sum) |
| 5985 | goto map; | ||
| 5986 | |||
| 5987 | if (write && async_submit) { | ||
| 5953 | ret = btrfs_wq_submit_bio(root->fs_info, | 5988 | ret = btrfs_wq_submit_bio(root->fs_info, |
| 5954 | inode, rw, bio, 0, 0, | 5989 | inode, rw, bio, 0, 0, |
| 5955 | file_offset, | 5990 | file_offset, |
| 5956 | __btrfs_submit_bio_start_direct_io, | 5991 | __btrfs_submit_bio_start_direct_io, |
| 5957 | __btrfs_submit_bio_done); | 5992 | __btrfs_submit_bio_done); |
| 5958 | goto err; | 5993 | goto err; |
| 5994 | } else if (write) { | ||
| 5995 | /* | ||
| 5996 | * If we aren't doing async submit, calculate the csum of the | ||
| 5997 | * bio now. | ||
| 5998 | */ | ||
| 5999 | ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1); | ||
| 6000 | if (ret) | ||
| 6001 | goto err; | ||
| 5959 | } else if (!skip_sum) { | 6002 | } else if (!skip_sum) { |
| 5960 | ret = btrfs_lookup_bio_sums_dio(root, inode, bio, | 6003 | ret = btrfs_lookup_bio_sums_dio(root, inode, bio, |
| 5961 | file_offset, csums); | 6004 | file_offset, csums); |
| @@ -5963,7 +6006,8 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, | |||
| 5963 | goto err; | 6006 | goto err; |
| 5964 | } | 6007 | } |
| 5965 | 6008 | ||
| 5966 | ret = btrfs_map_bio(root, rw, bio, 0, 1); | 6009 | map: |
| 6010 | ret = btrfs_map_bio(root, rw, bio, 0, async_submit); | ||
| 5967 | err: | 6011 | err: |
| 5968 | bio_put(bio); | 6012 | bio_put(bio); |
| 5969 | return ret; | 6013 | return ret; |
| @@ -5985,15 +6029,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
| 5985 | int nr_pages = 0; | 6029 | int nr_pages = 0; |
| 5986 | u32 *csums = dip->csums; | 6030 | u32 *csums = dip->csums; |
| 5987 | int ret = 0; | 6031 | int ret = 0; |
| 6032 | int async_submit = 0; | ||
| 5988 | int write = rw & REQ_WRITE; | 6033 | int write = rw & REQ_WRITE; |
| 5989 | 6034 | ||
| 5990 | bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); | ||
| 5991 | if (!bio) | ||
| 5992 | return -ENOMEM; | ||
| 5993 | bio->bi_private = dip; | ||
| 5994 | bio->bi_end_io = btrfs_end_dio_bio; | ||
| 5995 | atomic_inc(&dip->pending_bios); | ||
| 5996 | |||
| 5997 | map_length = orig_bio->bi_size; | 6035 | map_length = orig_bio->bi_size; |
| 5998 | ret = btrfs_map_block(map_tree, READ, start_sector << 9, | 6036 | ret = btrfs_map_block(map_tree, READ, start_sector << 9, |
| 5999 | &map_length, NULL, 0); | 6037 | &map_length, NULL, 0); |
| @@ -6002,6 +6040,19 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
| 6002 | return -EIO; | 6040 | return -EIO; |
| 6003 | } | 6041 | } |
| 6004 | 6042 | ||
| 6043 | if (map_length >= orig_bio->bi_size) { | ||
| 6044 | bio = orig_bio; | ||
| 6045 | goto submit; | ||
| 6046 | } | ||
| 6047 | |||
| 6048 | async_submit = 1; | ||
| 6049 | bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); | ||
| 6050 | if (!bio) | ||
| 6051 | return -ENOMEM; | ||
| 6052 | bio->bi_private = dip; | ||
| 6053 | bio->bi_end_io = btrfs_end_dio_bio; | ||
| 6054 | atomic_inc(&dip->pending_bios); | ||
| 6055 | |||
| 6005 | while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { | 6056 | while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { |
| 6006 | if (unlikely(map_length < submit_len + bvec->bv_len || | 6057 | if (unlikely(map_length < submit_len + bvec->bv_len || |
| 6007 | bio_add_page(bio, bvec->bv_page, bvec->bv_len, | 6058 | bio_add_page(bio, bvec->bv_page, bvec->bv_len, |
| @@ -6015,7 +6066,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
| 6015 | atomic_inc(&dip->pending_bios); | 6066 | atomic_inc(&dip->pending_bios); |
| 6016 | ret = __btrfs_submit_dio_bio(bio, inode, rw, | 6067 | ret = __btrfs_submit_dio_bio(bio, inode, rw, |
| 6017 | file_offset, skip_sum, | 6068 | file_offset, skip_sum, |
| 6018 | csums); | 6069 | csums, async_submit); |
| 6019 | if (ret) { | 6070 | if (ret) { |
| 6020 | bio_put(bio); | 6071 | bio_put(bio); |
| 6021 | atomic_dec(&dip->pending_bios); | 6072 | atomic_dec(&dip->pending_bios); |
| @@ -6052,8 +6103,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
| 6052 | } | 6103 | } |
| 6053 | } | 6104 | } |
| 6054 | 6105 | ||
| 6106 | submit: | ||
| 6055 | ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, | 6107 | ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, |
| 6056 | csums); | 6108 | csums, async_submit); |
| 6057 | if (!ret) | 6109 | if (!ret) |
| 6058 | return 0; | 6110 | return 0; |
| 6059 | 6111 | ||
| @@ -6148,6 +6200,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io | |||
| 6148 | unsigned long nr_segs) | 6200 | unsigned long nr_segs) |
| 6149 | { | 6201 | { |
| 6150 | int seg; | 6202 | int seg; |
| 6203 | int i; | ||
| 6151 | size_t size; | 6204 | size_t size; |
| 6152 | unsigned long addr; | 6205 | unsigned long addr; |
| 6153 | unsigned blocksize_mask = root->sectorsize - 1; | 6206 | unsigned blocksize_mask = root->sectorsize - 1; |
| @@ -6162,8 +6215,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io | |||
| 6162 | addr = (unsigned long)iov[seg].iov_base; | 6215 | addr = (unsigned long)iov[seg].iov_base; |
| 6163 | size = iov[seg].iov_len; | 6216 | size = iov[seg].iov_len; |
| 6164 | end += size; | 6217 | end += size; |
| 6165 | if ((addr & blocksize_mask) || (size & blocksize_mask)) | 6218 | if ((addr & blocksize_mask) || (size & blocksize_mask)) |
| 6166 | goto out; | 6219 | goto out; |
| 6220 | |||
| 6221 | /* If this is a write we don't need to check anymore */ | ||
| 6222 | if (rw & WRITE) | ||
| 6223 | continue; | ||
| 6224 | |||
| 6225 | /* | ||
| 6226 | * Check to make sure we don't have duplicate iov_base's in this | ||
| 6227 | * iovec, if so return EINVAL, otherwise we'll get csum errors | ||
| 6228 | * when reading back. | ||
| 6229 | */ | ||
| 6230 | for (i = seg + 1; i < nr_segs; i++) { | ||
| 6231 | if (iov[seg].iov_base == iov[i].iov_base) | ||
| 6232 | goto out; | ||
| 6233 | } | ||
| 6167 | } | 6234 | } |
| 6168 | retval = 0; | 6235 | retval = 0; |
| 6169 | out: | 6236 | out: |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index cfc264fefdb0..ffb48d6c5433 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
| @@ -2287,7 +2287,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) | |||
| 2287 | struct btrfs_ioctl_space_info space; | 2287 | struct btrfs_ioctl_space_info space; |
| 2288 | struct btrfs_ioctl_space_info *dest; | 2288 | struct btrfs_ioctl_space_info *dest; |
| 2289 | struct btrfs_ioctl_space_info *dest_orig; | 2289 | struct btrfs_ioctl_space_info *dest_orig; |
| 2290 | struct btrfs_ioctl_space_info *user_dest; | 2290 | struct btrfs_ioctl_space_info __user *user_dest; |
| 2291 | struct btrfs_space_info *info; | 2291 | struct btrfs_space_info *info; |
| 2292 | u64 types[] = {BTRFS_BLOCK_GROUP_DATA, | 2292 | u64 types[] = {BTRFS_BLOCK_GROUP_DATA, |
| 2293 | BTRFS_BLOCK_GROUP_SYSTEM, | 2293 | BTRFS_BLOCK_GROUP_SYSTEM, |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 58e7de9cc90c..0ac712efcdf2 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
| @@ -159,7 +159,7 @@ enum { | |||
| 159 | Opt_compress_type, Opt_compress_force, Opt_compress_force_type, | 159 | Opt_compress_type, Opt_compress_force, Opt_compress_force_type, |
| 160 | Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, | 160 | Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, |
| 161 | Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, | 161 | Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, |
| 162 | Opt_enospc_debug, Opt_err, | 162 | Opt_enospc_debug, Opt_subvolrootid, Opt_err, |
| 163 | }; | 163 | }; |
| 164 | 164 | ||
| 165 | static match_table_t tokens = { | 165 | static match_table_t tokens = { |
| @@ -189,6 +189,7 @@ static match_table_t tokens = { | |||
| 189 | {Opt_clear_cache, "clear_cache"}, | 189 | {Opt_clear_cache, "clear_cache"}, |
| 190 | {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, | 190 | {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, |
| 191 | {Opt_enospc_debug, "enospc_debug"}, | 191 | {Opt_enospc_debug, "enospc_debug"}, |
| 192 | {Opt_subvolrootid, "subvolrootid=%d"}, | ||
| 192 | {Opt_err, NULL}, | 193 | {Opt_err, NULL}, |
| 193 | }; | 194 | }; |
| 194 | 195 | ||
| @@ -232,6 +233,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
| 232 | break; | 233 | break; |
| 233 | case Opt_subvol: | 234 | case Opt_subvol: |
| 234 | case Opt_subvolid: | 235 | case Opt_subvolid: |
| 236 | case Opt_subvolrootid: | ||
| 235 | case Opt_device: | 237 | case Opt_device: |
| 236 | /* | 238 | /* |
| 237 | * These are parsed by btrfs_parse_early_options | 239 | * These are parsed by btrfs_parse_early_options |
| @@ -388,7 +390,7 @@ out: | |||
| 388 | */ | 390 | */ |
| 389 | static int btrfs_parse_early_options(const char *options, fmode_t flags, | 391 | static int btrfs_parse_early_options(const char *options, fmode_t flags, |
| 390 | void *holder, char **subvol_name, u64 *subvol_objectid, | 392 | void *holder, char **subvol_name, u64 *subvol_objectid, |
| 391 | struct btrfs_fs_devices **fs_devices) | 393 | u64 *subvol_rootid, struct btrfs_fs_devices **fs_devices) |
| 392 | { | 394 | { |
| 393 | substring_t args[MAX_OPT_ARGS]; | 395 | substring_t args[MAX_OPT_ARGS]; |
| 394 | char *opts, *orig, *p; | 396 | char *opts, *orig, *p; |
| @@ -429,6 +431,18 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, | |||
| 429 | *subvol_objectid = intarg; | 431 | *subvol_objectid = intarg; |
| 430 | } | 432 | } |
| 431 | break; | 433 | break; |
| 434 | case Opt_subvolrootid: | ||
| 435 | intarg = 0; | ||
| 436 | error = match_int(&args[0], &intarg); | ||
| 437 | if (!error) { | ||
| 438 | /* we want the original fs_tree */ | ||
| 439 | if (!intarg) | ||
| 440 | *subvol_rootid = | ||
| 441 | BTRFS_FS_TREE_OBJECTID; | ||
| 442 | else | ||
| 443 | *subvol_rootid = intarg; | ||
| 444 | } | ||
| 445 | break; | ||
| 432 | case Opt_device: | 446 | case Opt_device: |
| 433 | error = btrfs_scan_one_device(match_strdup(&args[0]), | 447 | error = btrfs_scan_one_device(match_strdup(&args[0]), |
| 434 | flags, holder, fs_devices); | 448 | flags, holder, fs_devices); |
| @@ -736,6 +750,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, | |||
| 736 | fmode_t mode = FMODE_READ; | 750 | fmode_t mode = FMODE_READ; |
| 737 | char *subvol_name = NULL; | 751 | char *subvol_name = NULL; |
| 738 | u64 subvol_objectid = 0; | 752 | u64 subvol_objectid = 0; |
| 753 | u64 subvol_rootid = 0; | ||
| 739 | int error = 0; | 754 | int error = 0; |
| 740 | 755 | ||
| 741 | if (!(flags & MS_RDONLY)) | 756 | if (!(flags & MS_RDONLY)) |
| @@ -743,7 +758,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, | |||
| 743 | 758 | ||
| 744 | error = btrfs_parse_early_options(data, mode, fs_type, | 759 | error = btrfs_parse_early_options(data, mode, fs_type, |
| 745 | &subvol_name, &subvol_objectid, | 760 | &subvol_name, &subvol_objectid, |
| 746 | &fs_devices); | 761 | &subvol_rootid, &fs_devices); |
| 747 | if (error) | 762 | if (error) |
| 748 | return ERR_PTR(error); | 763 | return ERR_PTR(error); |
| 749 | 764 | ||
| @@ -807,15 +822,17 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, | |||
| 807 | s->s_flags |= MS_ACTIVE; | 822 | s->s_flags |= MS_ACTIVE; |
| 808 | } | 823 | } |
| 809 | 824 | ||
| 810 | root = get_default_root(s, subvol_objectid); | ||
| 811 | if (IS_ERR(root)) { | ||
| 812 | error = PTR_ERR(root); | ||
| 813 | deactivate_locked_super(s); | ||
| 814 | goto error_free_subvol_name; | ||
| 815 | } | ||
| 816 | /* if they gave us a subvolume name bind mount into that */ | 825 | /* if they gave us a subvolume name bind mount into that */ |
| 817 | if (strcmp(subvol_name, ".")) { | 826 | if (strcmp(subvol_name, ".")) { |
| 818 | struct dentry *new_root; | 827 | struct dentry *new_root; |
| 828 | |||
| 829 | root = get_default_root(s, subvol_rootid); | ||
| 830 | if (IS_ERR(root)) { | ||
| 831 | error = PTR_ERR(root); | ||
| 832 | deactivate_locked_super(s); | ||
| 833 | goto error_free_subvol_name; | ||
| 834 | } | ||
| 835 | |||
| 819 | mutex_lock(&root->d_inode->i_mutex); | 836 | mutex_lock(&root->d_inode->i_mutex); |
| 820 | new_root = lookup_one_len(subvol_name, root, | 837 | new_root = lookup_one_len(subvol_name, root, |
| 821 | strlen(subvol_name)); | 838 | strlen(subvol_name)); |
| @@ -836,6 +853,13 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, | |||
| 836 | } | 853 | } |
| 837 | dput(root); | 854 | dput(root); |
| 838 | root = new_root; | 855 | root = new_root; |
| 856 | } else { | ||
| 857 | root = get_default_root(s, subvol_objectid); | ||
| 858 | if (IS_ERR(root)) { | ||
| 859 | error = PTR_ERR(root); | ||
| 860 | deactivate_locked_super(s); | ||
| 861 | goto error_free_subvol_name; | ||
| 862 | } | ||
| 839 | } | 863 | } |
| 840 | 864 | ||
| 841 | kfree(subvol_name); | 865 | kfree(subvol_name); |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 5b158da7e0bb..c571734d5e5a 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
| @@ -32,10 +32,8 @@ | |||
| 32 | 32 | ||
| 33 | static noinline void put_transaction(struct btrfs_transaction *transaction) | 33 | static noinline void put_transaction(struct btrfs_transaction *transaction) |
| 34 | { | 34 | { |
| 35 | WARN_ON(transaction->use_count == 0); | 35 | WARN_ON(atomic_read(&transaction->use_count) == 0); |
| 36 | transaction->use_count--; | 36 | if (atomic_dec_and_test(&transaction->use_count)) { |
| 37 | if (transaction->use_count == 0) { | ||
| 38 | list_del_init(&transaction->list); | ||
| 39 | memset(transaction, 0, sizeof(*transaction)); | 37 | memset(transaction, 0, sizeof(*transaction)); |
| 40 | kmem_cache_free(btrfs_transaction_cachep, transaction); | 38 | kmem_cache_free(btrfs_transaction_cachep, transaction); |
| 41 | } | 39 | } |
| @@ -60,14 +58,14 @@ static noinline int join_transaction(struct btrfs_root *root) | |||
| 60 | if (!cur_trans) | 58 | if (!cur_trans) |
| 61 | return -ENOMEM; | 59 | return -ENOMEM; |
| 62 | root->fs_info->generation++; | 60 | root->fs_info->generation++; |
| 63 | cur_trans->num_writers = 1; | 61 | atomic_set(&cur_trans->num_writers, 1); |
| 64 | cur_trans->num_joined = 0; | 62 | cur_trans->num_joined = 0; |
| 65 | cur_trans->transid = root->fs_info->generation; | 63 | cur_trans->transid = root->fs_info->generation; |
| 66 | init_waitqueue_head(&cur_trans->writer_wait); | 64 | init_waitqueue_head(&cur_trans->writer_wait); |
| 67 | init_waitqueue_head(&cur_trans->commit_wait); | 65 | init_waitqueue_head(&cur_trans->commit_wait); |
| 68 | cur_trans->in_commit = 0; | 66 | cur_trans->in_commit = 0; |
| 69 | cur_trans->blocked = 0; | 67 | cur_trans->blocked = 0; |
| 70 | cur_trans->use_count = 1; | 68 | atomic_set(&cur_trans->use_count, 1); |
| 71 | cur_trans->commit_done = 0; | 69 | cur_trans->commit_done = 0; |
| 72 | cur_trans->start_time = get_seconds(); | 70 | cur_trans->start_time = get_seconds(); |
| 73 | 71 | ||
| @@ -88,7 +86,7 @@ static noinline int join_transaction(struct btrfs_root *root) | |||
| 88 | root->fs_info->running_transaction = cur_trans; | 86 | root->fs_info->running_transaction = cur_trans; |
| 89 | spin_unlock(&root->fs_info->new_trans_lock); | 87 | spin_unlock(&root->fs_info->new_trans_lock); |
| 90 | } else { | 88 | } else { |
| 91 | cur_trans->num_writers++; | 89 | atomic_inc(&cur_trans->num_writers); |
| 92 | cur_trans->num_joined++; | 90 | cur_trans->num_joined++; |
| 93 | } | 91 | } |
| 94 | 92 | ||
| @@ -145,7 +143,7 @@ static void wait_current_trans(struct btrfs_root *root) | |||
| 145 | cur_trans = root->fs_info->running_transaction; | 143 | cur_trans = root->fs_info->running_transaction; |
| 146 | if (cur_trans && cur_trans->blocked) { | 144 | if (cur_trans && cur_trans->blocked) { |
| 147 | DEFINE_WAIT(wait); | 145 | DEFINE_WAIT(wait); |
| 148 | cur_trans->use_count++; | 146 | atomic_inc(&cur_trans->use_count); |
| 149 | while (1) { | 147 | while (1) { |
| 150 | prepare_to_wait(&root->fs_info->transaction_wait, &wait, | 148 | prepare_to_wait(&root->fs_info->transaction_wait, &wait, |
| 151 | TASK_UNINTERRUPTIBLE); | 149 | TASK_UNINTERRUPTIBLE); |
| @@ -181,6 +179,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, | |||
| 181 | { | 179 | { |
| 182 | struct btrfs_trans_handle *h; | 180 | struct btrfs_trans_handle *h; |
| 183 | struct btrfs_transaction *cur_trans; | 181 | struct btrfs_transaction *cur_trans; |
| 182 | int retries = 0; | ||
| 184 | int ret; | 183 | int ret; |
| 185 | 184 | ||
| 186 | if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) | 185 | if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) |
| @@ -204,7 +203,7 @@ again: | |||
| 204 | } | 203 | } |
| 205 | 204 | ||
| 206 | cur_trans = root->fs_info->running_transaction; | 205 | cur_trans = root->fs_info->running_transaction; |
| 207 | cur_trans->use_count++; | 206 | atomic_inc(&cur_trans->use_count); |
| 208 | if (type != TRANS_JOIN_NOLOCK) | 207 | if (type != TRANS_JOIN_NOLOCK) |
| 209 | mutex_unlock(&root->fs_info->trans_mutex); | 208 | mutex_unlock(&root->fs_info->trans_mutex); |
| 210 | 209 | ||
| @@ -224,10 +223,18 @@ again: | |||
| 224 | 223 | ||
| 225 | if (num_items > 0) { | 224 | if (num_items > 0) { |
| 226 | ret = btrfs_trans_reserve_metadata(h, root, num_items); | 225 | ret = btrfs_trans_reserve_metadata(h, root, num_items); |
| 227 | if (ret == -EAGAIN) { | 226 | if (ret == -EAGAIN && !retries) { |
| 227 | retries++; | ||
| 228 | btrfs_commit_transaction(h, root); | 228 | btrfs_commit_transaction(h, root); |
| 229 | goto again; | 229 | goto again; |
| 230 | } else if (ret == -EAGAIN) { | ||
| 231 | /* | ||
| 232 | * We have already retried and got EAGAIN, so really we | ||
| 233 | * don't have space, so set ret to -ENOSPC. | ||
| 234 | */ | ||
| 235 | ret = -ENOSPC; | ||
| 230 | } | 236 | } |
| 237 | |||
| 231 | if (ret < 0) { | 238 | if (ret < 0) { |
| 232 | btrfs_end_transaction(h, root); | 239 | btrfs_end_transaction(h, root); |
| 233 | return ERR_PTR(ret); | 240 | return ERR_PTR(ret); |
| @@ -327,7 +334,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) | |||
| 327 | goto out_unlock; /* nothing committing|committed */ | 334 | goto out_unlock; /* nothing committing|committed */ |
| 328 | } | 335 | } |
| 329 | 336 | ||
| 330 | cur_trans->use_count++; | 337 | atomic_inc(&cur_trans->use_count); |
| 331 | mutex_unlock(&root->fs_info->trans_mutex); | 338 | mutex_unlock(&root->fs_info->trans_mutex); |
| 332 | 339 | ||
| 333 | wait_for_commit(root, cur_trans); | 340 | wait_for_commit(root, cur_trans); |
| @@ -457,18 +464,14 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, | |||
| 457 | wake_up_process(info->transaction_kthread); | 464 | wake_up_process(info->transaction_kthread); |
| 458 | } | 465 | } |
| 459 | 466 | ||
| 460 | if (lock) | ||
| 461 | mutex_lock(&info->trans_mutex); | ||
| 462 | WARN_ON(cur_trans != info->running_transaction); | 467 | WARN_ON(cur_trans != info->running_transaction); |
| 463 | WARN_ON(cur_trans->num_writers < 1); | 468 | WARN_ON(atomic_read(&cur_trans->num_writers) < 1); |
| 464 | cur_trans->num_writers--; | 469 | atomic_dec(&cur_trans->num_writers); |
| 465 | 470 | ||
| 466 | smp_mb(); | 471 | smp_mb(); |
| 467 | if (waitqueue_active(&cur_trans->writer_wait)) | 472 | if (waitqueue_active(&cur_trans->writer_wait)) |
| 468 | wake_up(&cur_trans->writer_wait); | 473 | wake_up(&cur_trans->writer_wait); |
| 469 | put_transaction(cur_trans); | 474 | put_transaction(cur_trans); |
| 470 | if (lock) | ||
| 471 | mutex_unlock(&info->trans_mutex); | ||
| 472 | 475 | ||
| 473 | if (current->journal_info == trans) | 476 | if (current->journal_info == trans) |
| 474 | current->journal_info = NULL; | 477 | current->journal_info = NULL; |
| @@ -1178,7 +1181,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, | |||
| 1178 | /* take transaction reference */ | 1181 | /* take transaction reference */ |
| 1179 | mutex_lock(&root->fs_info->trans_mutex); | 1182 | mutex_lock(&root->fs_info->trans_mutex); |
| 1180 | cur_trans = trans->transaction; | 1183 | cur_trans = trans->transaction; |
| 1181 | cur_trans->use_count++; | 1184 | atomic_inc(&cur_trans->use_count); |
| 1182 | mutex_unlock(&root->fs_info->trans_mutex); | 1185 | mutex_unlock(&root->fs_info->trans_mutex); |
| 1183 | 1186 | ||
| 1184 | btrfs_end_transaction(trans, root); | 1187 | btrfs_end_transaction(trans, root); |
| @@ -1237,7 +1240,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
| 1237 | 1240 | ||
| 1238 | mutex_lock(&root->fs_info->trans_mutex); | 1241 | mutex_lock(&root->fs_info->trans_mutex); |
| 1239 | if (cur_trans->in_commit) { | 1242 | if (cur_trans->in_commit) { |
| 1240 | cur_trans->use_count++; | 1243 | atomic_inc(&cur_trans->use_count); |
| 1241 | mutex_unlock(&root->fs_info->trans_mutex); | 1244 | mutex_unlock(&root->fs_info->trans_mutex); |
| 1242 | btrfs_end_transaction(trans, root); | 1245 | btrfs_end_transaction(trans, root); |
| 1243 | 1246 | ||
| @@ -1259,7 +1262,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
| 1259 | prev_trans = list_entry(cur_trans->list.prev, | 1262 | prev_trans = list_entry(cur_trans->list.prev, |
| 1260 | struct btrfs_transaction, list); | 1263 | struct btrfs_transaction, list); |
| 1261 | if (!prev_trans->commit_done) { | 1264 | if (!prev_trans->commit_done) { |
| 1262 | prev_trans->use_count++; | 1265 | atomic_inc(&prev_trans->use_count); |
| 1263 | mutex_unlock(&root->fs_info->trans_mutex); | 1266 | mutex_unlock(&root->fs_info->trans_mutex); |
| 1264 | 1267 | ||
| 1265 | wait_for_commit(root, prev_trans); | 1268 | wait_for_commit(root, prev_trans); |
| @@ -1300,14 +1303,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
| 1300 | TASK_UNINTERRUPTIBLE); | 1303 | TASK_UNINTERRUPTIBLE); |
| 1301 | 1304 | ||
| 1302 | smp_mb(); | 1305 | smp_mb(); |
| 1303 | if (cur_trans->num_writers > 1) | 1306 | if (atomic_read(&cur_trans->num_writers) > 1) |
| 1304 | schedule_timeout(MAX_SCHEDULE_TIMEOUT); | 1307 | schedule_timeout(MAX_SCHEDULE_TIMEOUT); |
| 1305 | else if (should_grow) | 1308 | else if (should_grow) |
| 1306 | schedule_timeout(1); | 1309 | schedule_timeout(1); |
| 1307 | 1310 | ||
| 1308 | mutex_lock(&root->fs_info->trans_mutex); | 1311 | mutex_lock(&root->fs_info->trans_mutex); |
| 1309 | finish_wait(&cur_trans->writer_wait, &wait); | 1312 | finish_wait(&cur_trans->writer_wait, &wait); |
| 1310 | } while (cur_trans->num_writers > 1 || | 1313 | } while (atomic_read(&cur_trans->num_writers) > 1 || |
| 1311 | (should_grow && cur_trans->num_joined != joined)); | 1314 | (should_grow && cur_trans->num_joined != joined)); |
| 1312 | 1315 | ||
| 1313 | ret = create_pending_snapshots(trans, root->fs_info); | 1316 | ret = create_pending_snapshots(trans, root->fs_info); |
| @@ -1394,6 +1397,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
| 1394 | 1397 | ||
| 1395 | wake_up(&cur_trans->commit_wait); | 1398 | wake_up(&cur_trans->commit_wait); |
| 1396 | 1399 | ||
| 1400 | list_del_init(&cur_trans->list); | ||
| 1397 | put_transaction(cur_trans); | 1401 | put_transaction(cur_trans); |
| 1398 | put_transaction(cur_trans); | 1402 | put_transaction(cur_trans); |
| 1399 | 1403 | ||
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 229a594cacd5..e441acc6c584 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h | |||
| @@ -27,11 +27,11 @@ struct btrfs_transaction { | |||
| 27 | * total writers in this transaction, it must be zero before the | 27 | * total writers in this transaction, it must be zero before the |
| 28 | * transaction can end | 28 | * transaction can end |
| 29 | */ | 29 | */ |
| 30 | unsigned long num_writers; | 30 | atomic_t num_writers; |
| 31 | 31 | ||
| 32 | unsigned long num_joined; | 32 | unsigned long num_joined; |
| 33 | int in_commit; | 33 | int in_commit; |
| 34 | int use_count; | 34 | atomic_t use_count; |
| 35 | int commit_done; | 35 | int commit_done; |
| 36 | int blocked; | 36 | int blocked; |
| 37 | struct list_head list; | 37 | struct list_head list; |
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index a5303b871b13..cfd660550ded 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c | |||
| @@ -180,11 +180,10 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) | |||
| 180 | struct btrfs_path *path; | 180 | struct btrfs_path *path; |
| 181 | struct extent_buffer *leaf; | 181 | struct extent_buffer *leaf; |
| 182 | struct btrfs_dir_item *di; | 182 | struct btrfs_dir_item *di; |
| 183 | int ret = 0, slot, advance; | 183 | int ret = 0, slot; |
| 184 | size_t total_size = 0, size_left = size; | 184 | size_t total_size = 0, size_left = size; |
| 185 | unsigned long name_ptr; | 185 | unsigned long name_ptr; |
| 186 | size_t name_len; | 186 | size_t name_len; |
| 187 | u32 nritems; | ||
| 188 | 187 | ||
| 189 | /* | 188 | /* |
| 190 | * ok we want all objects associated with this id. | 189 | * ok we want all objects associated with this id. |
| @@ -204,34 +203,24 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) | |||
| 204 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 203 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| 205 | if (ret < 0) | 204 | if (ret < 0) |
| 206 | goto err; | 205 | goto err; |
| 207 | advance = 0; | 206 | |
| 208 | while (1) { | 207 | while (1) { |
| 209 | leaf = path->nodes[0]; | 208 | leaf = path->nodes[0]; |
| 210 | nritems = btrfs_header_nritems(leaf); | ||
| 211 | slot = path->slots[0]; | 209 | slot = path->slots[0]; |
| 212 | 210 | ||
| 213 | /* this is where we start walking through the path */ | 211 | /* this is where we start walking through the path */ |
| 214 | if (advance || slot >= nritems) { | 212 | if (slot >= btrfs_header_nritems(leaf)) { |
| 215 | /* | 213 | /* |
| 216 | * if we've reached the last slot in this leaf we need | 214 | * if we've reached the last slot in this leaf we need |
| 217 | * to go to the next leaf and reset everything | 215 | * to go to the next leaf and reset everything |
| 218 | */ | 216 | */ |
| 219 | if (slot >= nritems-1) { | 217 | ret = btrfs_next_leaf(root, path); |
| 220 | ret = btrfs_next_leaf(root, path); | 218 | if (ret < 0) |
| 221 | if (ret) | 219 | goto err; |
| 222 | break; | 220 | else if (ret > 0) |
| 223 | leaf = path->nodes[0]; | 221 | break; |
| 224 | nritems = btrfs_header_nritems(leaf); | 222 | continue; |
| 225 | slot = path->slots[0]; | ||
| 226 | } else { | ||
| 227 | /* | ||
| 228 | * just walking through the slots on this leaf | ||
| 229 | */ | ||
| 230 | slot++; | ||
| 231 | path->slots[0]++; | ||
| 232 | } | ||
| 233 | } | 223 | } |
| 234 | advance = 1; | ||
| 235 | 224 | ||
| 236 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | 225 | btrfs_item_key_to_cpu(leaf, &found_key, slot); |
| 237 | 226 | ||
| @@ -250,7 +239,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) | |||
| 250 | 239 | ||
| 251 | /* we are just looking for how big our buffer needs to be */ | 240 | /* we are just looking for how big our buffer needs to be */ |
| 252 | if (!size) | 241 | if (!size) |
| 253 | continue; | 242 | goto next; |
| 254 | 243 | ||
| 255 | if (!buffer || (name_len + 1) > size_left) { | 244 | if (!buffer || (name_len + 1) > size_left) { |
| 256 | ret = -ERANGE; | 245 | ret = -ERANGE; |
| @@ -263,6 +252,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) | |||
| 263 | 252 | ||
| 264 | size_left -= name_len + 1; | 253 | size_left -= name_len + 1; |
| 265 | buffer += name_len + 1; | 254 | buffer += name_len + 1; |
| 255 | next: | ||
| 256 | path->slots[0]++; | ||
| 266 | } | 257 | } |
| 267 | ret = total_size; | 258 | ret = total_size; |
| 268 | 259 | ||
diff --git a/fs/cifs/README b/fs/cifs/README index fe1683590828..74ab165fc646 100644 --- a/fs/cifs/README +++ b/fs/cifs/README | |||
| @@ -685,22 +685,6 @@ LinuxExtensionsEnabled If set to one then the client will attempt to | |||
| 685 | support and want to map the uid and gid fields | 685 | support and want to map the uid and gid fields |
| 686 | to values supplied at mount (rather than the | 686 | to values supplied at mount (rather than the |
| 687 | actual values, then set this to zero. (default 1) | 687 | actual values, then set this to zero. (default 1) |
| 688 | Experimental When set to 1 used to enable certain experimental | ||
| 689 | features (currently enables multipage writes | ||
| 690 | when signing is enabled, the multipage write | ||
| 691 | performance enhancement was disabled when | ||
| 692 | signing turned on in case buffer was modified | ||
| 693 | just before it was sent, also this flag will | ||
| 694 | be used to use the new experimental directory change | ||
| 695 | notification code). When set to 2 enables | ||
| 696 | an additional experimental feature, "raw ntlmssp" | ||
| 697 | session establishment support (which allows | ||
| 698 | specifying "sec=ntlmssp" on mount). The Linux cifs | ||
| 699 | module will use ntlmv2 authentication encapsulated | ||
| 700 | in "raw ntlmssp" (not using SPNEGO) when | ||
| 701 | "sec=ntlmssp" is specified on mount. | ||
| 702 | This support also requires building cifs with | ||
| 703 | the CONFIG_CIFS_EXPERIMENTAL configuration flag. | ||
| 704 | 688 | ||
| 705 | These experimental features and tracing can be enabled by changing flags in | 689 | These experimental features and tracing can be enabled by changing flags in |
| 706 | /proc/fs/cifs (after the cifs module has been installed or built into the | 690 | /proc/fs/cifs (after the cifs module has been installed or built into the |
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c index e654dfd092c3..53d57a3fe427 100644 --- a/fs/cifs/cache.c +++ b/fs/cifs/cache.c | |||
| @@ -50,7 +50,7 @@ void cifs_fscache_unregister(void) | |||
| 50 | */ | 50 | */ |
| 51 | struct cifs_server_key { | 51 | struct cifs_server_key { |
| 52 | uint16_t family; /* address family */ | 52 | uint16_t family; /* address family */ |
| 53 | uint16_t port; /* IP port */ | 53 | __be16 port; /* IP port */ |
| 54 | union { | 54 | union { |
| 55 | struct in_addr ipv4_addr; | 55 | struct in_addr ipv4_addr; |
| 56 | struct in6_addr ipv6_addr; | 56 | struct in6_addr ipv6_addr; |
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 65829d32128c..30d01bc90855 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c | |||
| @@ -423,7 +423,6 @@ static const struct file_operations cifs_lookup_cache_proc_fops; | |||
| 423 | static const struct file_operations traceSMB_proc_fops; | 423 | static const struct file_operations traceSMB_proc_fops; |
| 424 | static const struct file_operations cifs_multiuser_mount_proc_fops; | 424 | static const struct file_operations cifs_multiuser_mount_proc_fops; |
| 425 | static const struct file_operations cifs_security_flags_proc_fops; | 425 | static const struct file_operations cifs_security_flags_proc_fops; |
| 426 | static const struct file_operations cifs_experimental_proc_fops; | ||
| 427 | static const struct file_operations cifs_linux_ext_proc_fops; | 426 | static const struct file_operations cifs_linux_ext_proc_fops; |
| 428 | 427 | ||
| 429 | void | 428 | void |
| @@ -441,8 +440,6 @@ cifs_proc_init(void) | |||
| 441 | proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops); | 440 | proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops); |
| 442 | proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops); | 441 | proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops); |
| 443 | proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops); | 442 | proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops); |
| 444 | proc_create("Experimental", 0, proc_fs_cifs, | ||
| 445 | &cifs_experimental_proc_fops); | ||
| 446 | proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs, | 443 | proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs, |
| 447 | &cifs_linux_ext_proc_fops); | 444 | &cifs_linux_ext_proc_fops); |
| 448 | proc_create("MultiuserMount", 0, proc_fs_cifs, | 445 | proc_create("MultiuserMount", 0, proc_fs_cifs, |
| @@ -469,7 +466,6 @@ cifs_proc_clean(void) | |||
| 469 | remove_proc_entry("OplockEnabled", proc_fs_cifs); | 466 | remove_proc_entry("OplockEnabled", proc_fs_cifs); |
| 470 | remove_proc_entry("SecurityFlags", proc_fs_cifs); | 467 | remove_proc_entry("SecurityFlags", proc_fs_cifs); |
| 471 | remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs); | 468 | remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs); |
| 472 | remove_proc_entry("Experimental", proc_fs_cifs); | ||
| 473 | remove_proc_entry("LookupCacheEnabled", proc_fs_cifs); | 469 | remove_proc_entry("LookupCacheEnabled", proc_fs_cifs); |
| 474 | remove_proc_entry("fs/cifs", NULL); | 470 | remove_proc_entry("fs/cifs", NULL); |
| 475 | } | 471 | } |
| @@ -550,45 +546,6 @@ static const struct file_operations cifs_oplock_proc_fops = { | |||
| 550 | .write = cifs_oplock_proc_write, | 546 | .write = cifs_oplock_proc_write, |
| 551 | }; | 547 | }; |
| 552 | 548 | ||
| 553 | static int cifs_experimental_proc_show(struct seq_file *m, void *v) | ||
| 554 | { | ||
| 555 | seq_printf(m, "%d\n", experimEnabled); | ||
| 556 | return 0; | ||
| 557 | } | ||
| 558 | |||
| 559 | static int cifs_experimental_proc_open(struct inode *inode, struct file *file) | ||
| 560 | { | ||
| 561 | return single_open(file, cifs_experimental_proc_show, NULL); | ||
| 562 | } | ||
| 563 | |||
| 564 | static ssize_t cifs_experimental_proc_write(struct file *file, | ||
| 565 | const char __user *buffer, size_t count, loff_t *ppos) | ||
| 566 | { | ||
| 567 | char c; | ||
| 568 | int rc; | ||
| 569 | |||
| 570 | rc = get_user(c, buffer); | ||
| 571 | if (rc) | ||
| 572 | return rc; | ||
| 573 | if (c == '0' || c == 'n' || c == 'N') | ||
| 574 | experimEnabled = 0; | ||
| 575 | else if (c == '1' || c == 'y' || c == 'Y') | ||
| 576 | experimEnabled = 1; | ||
| 577 | else if (c == '2') | ||
| 578 | experimEnabled = 2; | ||
| 579 | |||
| 580 | return count; | ||
| 581 | } | ||
| 582 | |||
| 583 | static const struct file_operations cifs_experimental_proc_fops = { | ||
| 584 | .owner = THIS_MODULE, | ||
| 585 | .open = cifs_experimental_proc_open, | ||
| 586 | .read = seq_read, | ||
| 587 | .llseek = seq_lseek, | ||
| 588 | .release = single_release, | ||
| 589 | .write = cifs_experimental_proc_write, | ||
| 590 | }; | ||
| 591 | |||
| 592 | static int cifs_linux_ext_proc_show(struct seq_file *m, void *v) | 549 | static int cifs_linux_ext_proc_show(struct seq_file *m, void *v) |
| 593 | { | 550 | { |
| 594 | seq_printf(m, "%d\n", linuxExtEnabled); | 551 | seq_printf(m, "%d\n", linuxExtEnabled); |
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index 4dfba8283165..33d221394aca 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c | |||
| @@ -113,7 +113,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) | |||
| 113 | MAX_MECH_STR_LEN + | 113 | MAX_MECH_STR_LEN + |
| 114 | UID_KEY_LEN + (sizeof(uid_t) * 2) + | 114 | UID_KEY_LEN + (sizeof(uid_t) * 2) + |
| 115 | CREDUID_KEY_LEN + (sizeof(uid_t) * 2) + | 115 | CREDUID_KEY_LEN + (sizeof(uid_t) * 2) + |
| 116 | USER_KEY_LEN + strlen(sesInfo->userName) + | 116 | USER_KEY_LEN + strlen(sesInfo->user_name) + |
| 117 | PID_KEY_LEN + (sizeof(pid_t) * 2) + 1; | 117 | PID_KEY_LEN + (sizeof(pid_t) * 2) + 1; |
| 118 | 118 | ||
| 119 | spnego_key = ERR_PTR(-ENOMEM); | 119 | spnego_key = ERR_PTR(-ENOMEM); |
| @@ -153,7 +153,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) | |||
| 153 | sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid); | 153 | sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid); |
| 154 | 154 | ||
| 155 | dp = description + strlen(description); | 155 | dp = description + strlen(description); |
| 156 | sprintf(dp, ";user=%s", sesInfo->userName); | 156 | sprintf(dp, ";user=%s", sesInfo->user_name); |
| 157 | 157 | ||
| 158 | dp = description + strlen(description); | 158 | dp = description + strlen(description); |
| 159 | sprintf(dp, ";pid=0x%x", current->pid); | 159 | sprintf(dp, ";pid=0x%x", current->pid); |
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index fc0fd4fde306..23d43cde4306 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c | |||
| @@ -90,7 +90,7 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp, | |||
| 90 | case UNI_COLON: | 90 | case UNI_COLON: |
| 91 | *target = ':'; | 91 | *target = ':'; |
| 92 | break; | 92 | break; |
| 93 | case UNI_ASTERIK: | 93 | case UNI_ASTERISK: |
| 94 | *target = '*'; | 94 | *target = '*'; |
| 95 | break; | 95 | break; |
| 96 | case UNI_QUESTION: | 96 | case UNI_QUESTION: |
| @@ -264,40 +264,40 @@ cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode, | |||
| 264 | * names are little endian 16 bit Unicode on the wire | 264 | * names are little endian 16 bit Unicode on the wire |
| 265 | */ | 265 | */ |
| 266 | int | 266 | int |
| 267 | cifsConvertToUCS(__le16 *target, const char *source, int maxlen, | 267 | cifsConvertToUCS(__le16 *target, const char *source, int srclen, |
| 268 | const struct nls_table *cp, int mapChars) | 268 | const struct nls_table *cp, int mapChars) |
| 269 | { | 269 | { |
| 270 | int i, j, charlen; | 270 | int i, j, charlen; |
| 271 | int len_remaining = maxlen; | ||
| 272 | char src_char; | 271 | char src_char; |
| 273 | __u16 temp; | 272 | __le16 dst_char; |
| 273 | wchar_t tmp; | ||
| 274 | 274 | ||
| 275 | if (!mapChars) | 275 | if (!mapChars) |
| 276 | return cifs_strtoUCS(target, source, PATH_MAX, cp); | 276 | return cifs_strtoUCS(target, source, PATH_MAX, cp); |
| 277 | 277 | ||
| 278 | for (i = 0, j = 0; i < maxlen; j++) { | 278 | for (i = 0, j = 0; i < srclen; j++) { |
| 279 | src_char = source[i]; | 279 | src_char = source[i]; |
| 280 | switch (src_char) { | 280 | switch (src_char) { |
| 281 | case 0: | 281 | case 0: |
| 282 | put_unaligned_le16(0, &target[j]); | 282 | put_unaligned(0, &target[j]); |
| 283 | goto ctoUCS_out; | 283 | goto ctoUCS_out; |
| 284 | case ':': | 284 | case ':': |
| 285 | temp = UNI_COLON; | 285 | dst_char = cpu_to_le16(UNI_COLON); |
| 286 | break; | 286 | break; |
| 287 | case '*': | 287 | case '*': |
| 288 | temp = UNI_ASTERIK; | 288 | dst_char = cpu_to_le16(UNI_ASTERISK); |
| 289 | break; | 289 | break; |
| 290 | case '?': | 290 | case '?': |
| 291 | temp = UNI_QUESTION; | 291 | dst_char = cpu_to_le16(UNI_QUESTION); |
| 292 | break; | 292 | break; |
| 293 | case '<': | 293 | case '<': |
| 294 | temp = UNI_LESSTHAN; | 294 | dst_char = cpu_to_le16(UNI_LESSTHAN); |
| 295 | break; | 295 | break; |
| 296 | case '>': | 296 | case '>': |
| 297 | temp = UNI_GRTRTHAN; | 297 | dst_char = cpu_to_le16(UNI_GRTRTHAN); |
| 298 | break; | 298 | break; |
| 299 | case '|': | 299 | case '|': |
| 300 | temp = UNI_PIPE; | 300 | dst_char = cpu_to_le16(UNI_PIPE); |
| 301 | break; | 301 | break; |
| 302 | /* | 302 | /* |
| 303 | * FIXME: We can not handle remapping backslash (UNI_SLASH) | 303 | * FIXME: We can not handle remapping backslash (UNI_SLASH) |
| @@ -305,17 +305,17 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen, | |||
| 305 | * as they use backslash as separator. | 305 | * as they use backslash as separator. |
| 306 | */ | 306 | */ |
| 307 | default: | 307 | default: |
| 308 | charlen = cp->char2uni(source+i, len_remaining, | 308 | charlen = cp->char2uni(source + i, srclen - i, &tmp); |
| 309 | &temp); | 309 | dst_char = cpu_to_le16(tmp); |
| 310 | |||
| 310 | /* | 311 | /* |
| 311 | * if no match, use question mark, which at least in | 312 | * if no match, use question mark, which at least in |
| 312 | * some cases serves as wild card | 313 | * some cases serves as wild card |
| 313 | */ | 314 | */ |
| 314 | if (charlen < 1) { | 315 | if (charlen < 1) { |
| 315 | temp = 0x003f; | 316 | dst_char = cpu_to_le16(0x003f); |
| 316 | charlen = 1; | 317 | charlen = 1; |
| 317 | } | 318 | } |
| 318 | len_remaining -= charlen; | ||
| 319 | /* | 319 | /* |
| 320 | * character may take more than one byte in the source | 320 | * character may take more than one byte in the source |
| 321 | * string, but will take exactly two bytes in the | 321 | * string, but will take exactly two bytes in the |
| @@ -324,9 +324,8 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen, | |||
| 324 | i += charlen; | 324 | i += charlen; |
| 325 | continue; | 325 | continue; |
| 326 | } | 326 | } |
| 327 | put_unaligned_le16(temp, &target[j]); | 327 | put_unaligned(dst_char, &target[j]); |
| 328 | i++; /* move to next char in source string */ | 328 | i++; /* move to next char in source string */ |
| 329 | len_remaining--; | ||
| 330 | } | 329 | } |
| 331 | 330 | ||
| 332 | ctoUCS_out: | 331 | ctoUCS_out: |
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h index 7fe6b52df507..644dd882a560 100644 --- a/fs/cifs/cifs_unicode.h +++ b/fs/cifs/cifs_unicode.h | |||
| @@ -44,7 +44,7 @@ | |||
| 44 | * reserved symbols (along with \ and /), otherwise illegal to store | 44 | * reserved symbols (along with \ and /), otherwise illegal to store |
| 45 | * in filenames in NTFS | 45 | * in filenames in NTFS |
| 46 | */ | 46 | */ |
| 47 | #define UNI_ASTERIK (__u16) ('*' + 0xF000) | 47 | #define UNI_ASTERISK (__u16) ('*' + 0xF000) |
| 48 | #define UNI_QUESTION (__u16) ('?' + 0xF000) | 48 | #define UNI_QUESTION (__u16) ('?' + 0xF000) |
| 49 | #define UNI_COLON (__u16) (':' + 0xF000) | 49 | #define UNI_COLON (__u16) (':' + 0xF000) |
| 50 | #define UNI_GRTRTHAN (__u16) ('>' + 0xF000) | 50 | #define UNI_GRTRTHAN (__u16) ('>' + 0xF000) |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index a51585f9852b..d1a016be73ba 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
| @@ -30,12 +30,13 @@ | |||
| 30 | #include <linux/ctype.h> | 30 | #include <linux/ctype.h> |
| 31 | #include <linux/random.h> | 31 | #include <linux/random.h> |
| 32 | 32 | ||
| 33 | /* Calculate and return the CIFS signature based on the mac key and SMB PDU */ | 33 | /* |
| 34 | /* the 16 byte signature must be allocated by the caller */ | 34 | * Calculate and return the CIFS signature based on the mac key and SMB PDU. |
| 35 | /* Note we only use the 1st eight bytes */ | 35 | * The 16 byte signature must be allocated by the caller. Note we only use the |
| 36 | /* Note that the smb header signature field on input contains the | 36 | * 1st eight bytes and that the smb header signature field on input contains |
| 37 | sequence number before this function is called */ | 37 | * the sequence number before this function is called. Also, this function |
| 38 | 38 | * should be called with the server->srv_mutex held. | |
| 39 | */ | ||
| 39 | static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, | 40 | static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, |
| 40 | struct TCP_Server_Info *server, char *signature) | 41 | struct TCP_Server_Info *server, char *signature) |
| 41 | { | 42 | { |
| @@ -209,8 +210,10 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu, | |||
| 209 | cpu_to_le32(expected_sequence_number); | 210 | cpu_to_le32(expected_sequence_number); |
| 210 | cifs_pdu->Signature.Sequence.Reserved = 0; | 211 | cifs_pdu->Signature.Sequence.Reserved = 0; |
| 211 | 212 | ||
| 213 | mutex_lock(&server->srv_mutex); | ||
| 212 | rc = cifs_calculate_signature(cifs_pdu, server, | 214 | rc = cifs_calculate_signature(cifs_pdu, server, |
| 213 | what_we_think_sig_should_be); | 215 | what_we_think_sig_should_be); |
| 216 | mutex_unlock(&server->srv_mutex); | ||
| 214 | 217 | ||
| 215 | if (rc) | 218 | if (rc) |
| 216 | return rc; | 219 | return rc; |
| @@ -469,15 +472,15 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses, char *ntlmv2_hash, | |||
| 469 | return rc; | 472 | return rc; |
| 470 | } | 473 | } |
| 471 | 474 | ||
| 472 | /* convert ses->userName to unicode and uppercase */ | 475 | /* convert ses->user_name to unicode and uppercase */ |
| 473 | len = strlen(ses->userName); | 476 | len = strlen(ses->user_name); |
| 474 | user = kmalloc(2 + (len * 2), GFP_KERNEL); | 477 | user = kmalloc(2 + (len * 2), GFP_KERNEL); |
| 475 | if (user == NULL) { | 478 | if (user == NULL) { |
| 476 | cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n"); | 479 | cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n"); |
| 477 | rc = -ENOMEM; | 480 | rc = -ENOMEM; |
| 478 | goto calc_exit_2; | 481 | goto calc_exit_2; |
| 479 | } | 482 | } |
| 480 | len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp); | 483 | len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp); |
| 481 | UniStrupr(user); | 484 | UniStrupr(user); |
| 482 | 485 | ||
| 483 | crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, | 486 | crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index f2970136d17d..5c412b33cd7c 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
| @@ -53,7 +53,6 @@ int cifsFYI = 0; | |||
| 53 | int cifsERROR = 1; | 53 | int cifsERROR = 1; |
| 54 | int traceSMB = 0; | 54 | int traceSMB = 0; |
| 55 | unsigned int oplockEnabled = 1; | 55 | unsigned int oplockEnabled = 1; |
| 56 | unsigned int experimEnabled = 0; | ||
| 57 | unsigned int linuxExtEnabled = 1; | 56 | unsigned int linuxExtEnabled = 1; |
| 58 | unsigned int lookupCacheEnabled = 1; | 57 | unsigned int lookupCacheEnabled = 1; |
| 59 | unsigned int multiuser_mount = 0; | 58 | unsigned int multiuser_mount = 0; |
| @@ -127,6 +126,7 @@ cifs_read_super(struct super_block *sb, void *data, | |||
| 127 | kfree(cifs_sb); | 126 | kfree(cifs_sb); |
| 128 | return rc; | 127 | return rc; |
| 129 | } | 128 | } |
| 129 | cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages; | ||
| 130 | 130 | ||
| 131 | #ifdef CONFIG_CIFS_DFS_UPCALL | 131 | #ifdef CONFIG_CIFS_DFS_UPCALL |
| 132 | /* copy mount params to sb for use in submounts */ | 132 | /* copy mount params to sb for use in submounts */ |
| @@ -409,8 +409,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m) | |||
| 409 | 409 | ||
| 410 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) | 410 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) |
| 411 | seq_printf(s, ",multiuser"); | 411 | seq_printf(s, ",multiuser"); |
| 412 | else if (tcon->ses->userName) | 412 | else if (tcon->ses->user_name) |
| 413 | seq_printf(s, ",username=%s", tcon->ses->userName); | 413 | seq_printf(s, ",username=%s", tcon->ses->user_name); |
| 414 | 414 | ||
| 415 | if (tcon->ses->domainName) | 415 | if (tcon->ses->domainName) |
| 416 | seq_printf(s, ",domain=%s", tcon->ses->domainName); | 416 | seq_printf(s, ",domain=%s", tcon->ses->domainName); |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 17afb0fbcaed..a5d1106fcbde 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
| @@ -37,10 +37,9 @@ | |||
| 37 | 37 | ||
| 38 | #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1) | 38 | #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1) |
| 39 | #define MAX_SERVER_SIZE 15 | 39 | #define MAX_SERVER_SIZE 15 |
| 40 | #define MAX_SHARE_SIZE 64 /* used to be 20, this should still be enough */ | 40 | #define MAX_SHARE_SIZE 80 |
| 41 | #define MAX_USERNAME_SIZE 32 /* 32 is to allow for 15 char names + null | 41 | #define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */ |
| 42 | termination then *2 for unicode versions */ | 42 | #define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ |
| 43 | #define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ | ||
| 44 | 43 | ||
| 45 | #define CIFS_MIN_RCV_POOL 4 | 44 | #define CIFS_MIN_RCV_POOL 4 |
| 46 | 45 | ||
| @@ -92,7 +91,8 @@ enum statusEnum { | |||
| 92 | CifsNew = 0, | 91 | CifsNew = 0, |
| 93 | CifsGood, | 92 | CifsGood, |
| 94 | CifsExiting, | 93 | CifsExiting, |
| 95 | CifsNeedReconnect | 94 | CifsNeedReconnect, |
| 95 | CifsNeedNegotiate | ||
| 96 | }; | 96 | }; |
| 97 | 97 | ||
| 98 | enum securityEnum { | 98 | enum securityEnum { |
| @@ -274,7 +274,7 @@ struct cifsSesInfo { | |||
| 274 | int capabilities; | 274 | int capabilities; |
| 275 | char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for | 275 | char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for |
| 276 | TCP names - will ipv6 and sctp addresses fit? */ | 276 | TCP names - will ipv6 and sctp addresses fit? */ |
| 277 | char userName[MAX_USERNAME_SIZE + 1]; | 277 | char *user_name; |
| 278 | char *domainName; | 278 | char *domainName; |
| 279 | char *password; | 279 | char *password; |
| 280 | struct session_key auth_key; | 280 | struct session_key auth_key; |
| @@ -817,7 +817,6 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions | |||
| 817 | have the uid/password or Kerberos credential | 817 | have the uid/password or Kerberos credential |
| 818 | or equivalent for current user */ | 818 | or equivalent for current user */ |
| 819 | GLOBAL_EXTERN unsigned int oplockEnabled; | 819 | GLOBAL_EXTERN unsigned int oplockEnabled; |
| 820 | GLOBAL_EXTERN unsigned int experimEnabled; | ||
| 821 | GLOBAL_EXTERN unsigned int lookupCacheEnabled; | 820 | GLOBAL_EXTERN unsigned int lookupCacheEnabled; |
| 822 | GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent | 821 | GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent |
| 823 | with more secure ntlmssp2 challenge/resp */ | 822 | with more secure ntlmssp2 challenge/resp */ |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 2644a5d6cc67..df959bae6728 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
| @@ -142,9 +142,9 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) | |||
| 142 | */ | 142 | */ |
| 143 | while (server->tcpStatus == CifsNeedReconnect) { | 143 | while (server->tcpStatus == CifsNeedReconnect) { |
| 144 | wait_event_interruptible_timeout(server->response_q, | 144 | wait_event_interruptible_timeout(server->response_q, |
| 145 | (server->tcpStatus == CifsGood), 10 * HZ); | 145 | (server->tcpStatus != CifsNeedReconnect), 10 * HZ); |
| 146 | 146 | ||
| 147 | /* is TCP session is reestablished now ?*/ | 147 | /* are we still trying to reconnect? */ |
| 148 | if (server->tcpStatus != CifsNeedReconnect) | 148 | if (server->tcpStatus != CifsNeedReconnect) |
| 149 | break; | 149 | break; |
| 150 | 150 | ||
| @@ -729,7 +729,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server) | |||
| 729 | return rc; | 729 | return rc; |
| 730 | 730 | ||
| 731 | /* set up echo request */ | 731 | /* set up echo request */ |
| 732 | smb->hdr.Tid = cpu_to_le16(0xffff); | 732 | smb->hdr.Tid = 0xffff; |
| 733 | smb->hdr.WordCount = 1; | 733 | smb->hdr.WordCount = 1; |
| 734 | put_unaligned_le16(1, &smb->EchoCount); | 734 | put_unaligned_le16(1, &smb->EchoCount); |
| 735 | put_bcc_le(1, &smb->hdr); | 735 | put_bcc_le(1, &smb->hdr); |
| @@ -1884,10 +1884,10 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, | |||
| 1884 | __constant_cpu_to_le16(CIFS_WRLCK)) | 1884 | __constant_cpu_to_le16(CIFS_WRLCK)) |
| 1885 | pLockData->fl_type = F_WRLCK; | 1885 | pLockData->fl_type = F_WRLCK; |
| 1886 | 1886 | ||
| 1887 | pLockData->fl_start = parm_data->start; | 1887 | pLockData->fl_start = le64_to_cpu(parm_data->start); |
| 1888 | pLockData->fl_end = parm_data->start + | 1888 | pLockData->fl_end = pLockData->fl_start + |
| 1889 | parm_data->length - 1; | 1889 | le64_to_cpu(parm_data->length) - 1; |
| 1890 | pLockData->fl_pid = parm_data->pid; | 1890 | pLockData->fl_pid = le32_to_cpu(parm_data->pid); |
| 1891 | } | 1891 | } |
| 1892 | } | 1892 | } |
| 1893 | 1893 | ||
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 6e2b2addfc78..db9d55b507d0 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
| @@ -199,8 +199,7 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
| 199 | } | 199 | } |
| 200 | spin_unlock(&GlobalMid_Lock); | 200 | spin_unlock(&GlobalMid_Lock); |
| 201 | 201 | ||
| 202 | while ((server->tcpStatus != CifsExiting) && | 202 | while (server->tcpStatus == CifsNeedReconnect) { |
| 203 | (server->tcpStatus != CifsGood)) { | ||
| 204 | try_to_freeze(); | 203 | try_to_freeze(); |
| 205 | 204 | ||
| 206 | /* we should try only the port we connected to before */ | 205 | /* we should try only the port we connected to before */ |
| @@ -212,7 +211,7 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
| 212 | atomic_inc(&tcpSesReconnectCount); | 211 | atomic_inc(&tcpSesReconnectCount); |
| 213 | spin_lock(&GlobalMid_Lock); | 212 | spin_lock(&GlobalMid_Lock); |
| 214 | if (server->tcpStatus != CifsExiting) | 213 | if (server->tcpStatus != CifsExiting) |
| 215 | server->tcpStatus = CifsGood; | 214 | server->tcpStatus = CifsNeedNegotiate; |
| 216 | spin_unlock(&GlobalMid_Lock); | 215 | spin_unlock(&GlobalMid_Lock); |
| 217 | } | 216 | } |
| 218 | } | 217 | } |
| @@ -248,24 +247,24 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize) | |||
| 248 | total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); | 247 | total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); |
| 249 | data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); | 248 | data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); |
| 250 | 249 | ||
| 251 | remaining = total_data_size - data_in_this_rsp; | 250 | if (total_data_size == data_in_this_rsp) |
| 252 | |||
| 253 | if (remaining == 0) | ||
| 254 | return 0; | 251 | return 0; |
| 255 | else if (remaining < 0) { | 252 | else if (total_data_size < data_in_this_rsp) { |
| 256 | cFYI(1, "total data %d smaller than data in frame %d", | 253 | cFYI(1, "total data %d smaller than data in frame %d", |
| 257 | total_data_size, data_in_this_rsp); | 254 | total_data_size, data_in_this_rsp); |
| 258 | return -EINVAL; | 255 | return -EINVAL; |
| 259 | } else { | ||
| 260 | cFYI(1, "missing %d bytes from transact2, check next response", | ||
| 261 | remaining); | ||
| 262 | if (total_data_size > maxBufSize) { | ||
| 263 | cERROR(1, "TotalDataSize %d is over maximum buffer %d", | ||
| 264 | total_data_size, maxBufSize); | ||
| 265 | return -EINVAL; | ||
| 266 | } | ||
| 267 | return remaining; | ||
| 268 | } | 256 | } |
| 257 | |||
| 258 | remaining = total_data_size - data_in_this_rsp; | ||
| 259 | |||
| 260 | cFYI(1, "missing %d bytes from transact2, check next response", | ||
| 261 | remaining); | ||
| 262 | if (total_data_size > maxBufSize) { | ||
| 263 | cERROR(1, "TotalDataSize %d is over maximum buffer %d", | ||
| 264 | total_data_size, maxBufSize); | ||
| 265 | return -EINVAL; | ||
| 266 | } | ||
| 267 | return remaining; | ||
| 269 | } | 268 | } |
| 270 | 269 | ||
| 271 | static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) | 270 | static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) |
| @@ -421,7 +420,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) | |||
| 421 | pdu_length = 4; /* enough to get RFC1001 header */ | 420 | pdu_length = 4; /* enough to get RFC1001 header */ |
| 422 | 421 | ||
| 423 | incomplete_rcv: | 422 | incomplete_rcv: |
| 424 | if (echo_retries > 0 && | 423 | if (echo_retries > 0 && server->tcpStatus == CifsGood && |
| 425 | time_after(jiffies, server->lstrp + | 424 | time_after(jiffies, server->lstrp + |
| 426 | (echo_retries * SMB_ECHO_INTERVAL))) { | 425 | (echo_retries * SMB_ECHO_INTERVAL))) { |
| 427 | cERROR(1, "Server %s has not responded in %d seconds. " | 426 | cERROR(1, "Server %s has not responded in %d seconds. " |
| @@ -881,7 +880,8 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
| 881 | /* null user, ie anonymous, authentication */ | 880 | /* null user, ie anonymous, authentication */ |
| 882 | vol->nullauth = 1; | 881 | vol->nullauth = 1; |
| 883 | } | 882 | } |
| 884 | if (strnlen(value, 200) < 200) { | 883 | if (strnlen(value, MAX_USERNAME_SIZE) < |
| 884 | MAX_USERNAME_SIZE) { | ||
| 885 | vol->username = value; | 885 | vol->username = value; |
| 886 | } else { | 886 | } else { |
| 887 | printk(KERN_WARNING "CIFS: username too long\n"); | 887 | printk(KERN_WARNING "CIFS: username too long\n"); |
| @@ -1472,7 +1472,7 @@ srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs) | |||
| 1472 | static bool | 1472 | static bool |
| 1473 | match_port(struct TCP_Server_Info *server, struct sockaddr *addr) | 1473 | match_port(struct TCP_Server_Info *server, struct sockaddr *addr) |
| 1474 | { | 1474 | { |
| 1475 | unsigned short int port, *sport; | 1475 | __be16 port, *sport; |
| 1476 | 1476 | ||
| 1477 | switch (addr->sa_family) { | 1477 | switch (addr->sa_family) { |
| 1478 | case AF_INET: | 1478 | case AF_INET: |
| @@ -1765,6 +1765,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) | |||
| 1765 | module_put(THIS_MODULE); | 1765 | module_put(THIS_MODULE); |
| 1766 | goto out_err_crypto_release; | 1766 | goto out_err_crypto_release; |
| 1767 | } | 1767 | } |
| 1768 | tcp_ses->tcpStatus = CifsNeedNegotiate; | ||
| 1768 | 1769 | ||
| 1769 | /* thread spawned, put it on the list */ | 1770 | /* thread spawned, put it on the list */ |
| 1770 | spin_lock(&cifs_tcp_ses_lock); | 1771 | spin_lock(&cifs_tcp_ses_lock); |
| @@ -1808,7 +1809,9 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol) | |||
| 1808 | break; | 1809 | break; |
| 1809 | default: | 1810 | default: |
| 1810 | /* anything else takes username/password */ | 1811 | /* anything else takes username/password */ |
| 1811 | if (strncmp(ses->userName, vol->username, | 1812 | if (ses->user_name == NULL) |
| 1813 | continue; | ||
| 1814 | if (strncmp(ses->user_name, vol->username, | ||
| 1812 | MAX_USERNAME_SIZE)) | 1815 | MAX_USERNAME_SIZE)) |
| 1813 | continue; | 1816 | continue; |
| 1814 | if (strlen(vol->username) != 0 && | 1817 | if (strlen(vol->username) != 0 && |
| @@ -1851,6 +1854,8 @@ cifs_put_smb_ses(struct cifsSesInfo *ses) | |||
| 1851 | cifs_put_tcp_session(server); | 1854 | cifs_put_tcp_session(server); |
| 1852 | } | 1855 | } |
| 1853 | 1856 | ||
| 1857 | static bool warned_on_ntlm; /* globals init to false automatically */ | ||
| 1858 | |||
| 1854 | static struct cifsSesInfo * | 1859 | static struct cifsSesInfo * |
| 1855 | cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) | 1860 | cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) |
| 1856 | { | 1861 | { |
| @@ -1906,9 +1911,11 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) | |||
| 1906 | else | 1911 | else |
| 1907 | sprintf(ses->serverName, "%pI4", &addr->sin_addr); | 1912 | sprintf(ses->serverName, "%pI4", &addr->sin_addr); |
| 1908 | 1913 | ||
| 1909 | if (volume_info->username) | 1914 | if (volume_info->username) { |
| 1910 | strncpy(ses->userName, volume_info->username, | 1915 | ses->user_name = kstrdup(volume_info->username, GFP_KERNEL); |
| 1911 | MAX_USERNAME_SIZE); | 1916 | if (!ses->user_name) |
| 1917 | goto get_ses_fail; | ||
| 1918 | } | ||
| 1912 | 1919 | ||
| 1913 | /* volume_info->password freed at unmount */ | 1920 | /* volume_info->password freed at unmount */ |
| 1914 | if (volume_info->password) { | 1921 | if (volume_info->password) { |
| @@ -1923,6 +1930,15 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) | |||
| 1923 | } | 1930 | } |
| 1924 | ses->cred_uid = volume_info->cred_uid; | 1931 | ses->cred_uid = volume_info->cred_uid; |
| 1925 | ses->linux_uid = volume_info->linux_uid; | 1932 | ses->linux_uid = volume_info->linux_uid; |
| 1933 | |||
| 1934 | /* ntlmv2 is much stronger than ntlm security, and has been broadly | ||
| 1935 | supported for many years, time to update default security mechanism */ | ||
| 1936 | if ((volume_info->secFlg == 0) && warned_on_ntlm == false) { | ||
| 1937 | warned_on_ntlm = true; | ||
| 1938 | cERROR(1, "default security mechanism requested. The default " | ||
| 1939 | "security mechanism will be upgraded from ntlm to " | ||
| 1940 | "ntlmv2 in kernel release 2.6.41"); | ||
| 1941 | } | ||
| 1926 | ses->overrideSecFlg = volume_info->secFlg; | 1942 | ses->overrideSecFlg = volume_info->secFlg; |
| 1927 | 1943 | ||
| 1928 | mutex_lock(&ses->session_mutex); | 1944 | mutex_lock(&ses->session_mutex); |
| @@ -2276,7 +2292,7 @@ static int | |||
| 2276 | generic_ip_connect(struct TCP_Server_Info *server) | 2292 | generic_ip_connect(struct TCP_Server_Info *server) |
| 2277 | { | 2293 | { |
| 2278 | int rc = 0; | 2294 | int rc = 0; |
| 2279 | unsigned short int sport; | 2295 | __be16 sport; |
| 2280 | int slen, sfamily; | 2296 | int slen, sfamily; |
| 2281 | struct socket *socket = server->ssocket; | 2297 | struct socket *socket = server->ssocket; |
| 2282 | struct sockaddr *saddr; | 2298 | struct sockaddr *saddr; |
| @@ -2361,7 +2377,7 @@ generic_ip_connect(struct TCP_Server_Info *server) | |||
| 2361 | static int | 2377 | static int |
| 2362 | ip_connect(struct TCP_Server_Info *server) | 2378 | ip_connect(struct TCP_Server_Info *server) |
| 2363 | { | 2379 | { |
| 2364 | unsigned short int *sport; | 2380 | __be16 *sport; |
| 2365 | struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; | 2381 | struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; |
| 2366 | struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; | 2382 | struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; |
| 2367 | 2383 | ||
| @@ -2826,7 +2842,7 @@ try_mount_again: | |||
| 2826 | 2842 | ||
| 2827 | remote_path_check: | 2843 | remote_path_check: |
| 2828 | /* check if a whole path (including prepath) is not remote */ | 2844 | /* check if a whole path (including prepath) is not remote */ |
| 2829 | if (!rc && cifs_sb->prepathlen && tcon) { | 2845 | if (!rc && tcon) { |
| 2830 | /* build_path_to_root works only when we have a valid tcon */ | 2846 | /* build_path_to_root works only when we have a valid tcon */ |
| 2831 | full_path = cifs_build_path_to_root(cifs_sb, tcon); | 2847 | full_path = cifs_build_path_to_root(cifs_sb, tcon); |
| 2832 | if (full_path == NULL) { | 2848 | if (full_path == NULL) { |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index c27d236738fc..faf59529e847 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
| @@ -575,8 +575,10 @@ reopen_error_exit: | |||
| 575 | 575 | ||
| 576 | int cifs_close(struct inode *inode, struct file *file) | 576 | int cifs_close(struct inode *inode, struct file *file) |
| 577 | { | 577 | { |
| 578 | cifsFileInfo_put(file->private_data); | 578 | if (file->private_data != NULL) { |
| 579 | file->private_data = NULL; | 579 | cifsFileInfo_put(file->private_data); |
| 580 | file->private_data = NULL; | ||
| 581 | } | ||
| 580 | 582 | ||
| 581 | /* return code from the ->release op is always ignored */ | 583 | /* return code from the ->release op is always ignored */ |
| 582 | return 0; | 584 | return 0; |
| @@ -970,6 +972,9 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, | |||
| 970 | total_written += bytes_written) { | 972 | total_written += bytes_written) { |
| 971 | rc = -EAGAIN; | 973 | rc = -EAGAIN; |
| 972 | while (rc == -EAGAIN) { | 974 | while (rc == -EAGAIN) { |
| 975 | struct kvec iov[2]; | ||
| 976 | unsigned int len; | ||
| 977 | |||
| 973 | if (open_file->invalidHandle) { | 978 | if (open_file->invalidHandle) { |
| 974 | /* we could deadlock if we called | 979 | /* we could deadlock if we called |
| 975 | filemap_fdatawait from here so tell | 980 | filemap_fdatawait from here so tell |
| @@ -979,31 +984,14 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, | |||
| 979 | if (rc != 0) | 984 | if (rc != 0) |
| 980 | break; | 985 | break; |
| 981 | } | 986 | } |
| 982 | if (experimEnabled || (pTcon->ses->server && | 987 | |
| 983 | ((pTcon->ses->server->secMode & | 988 | len = min((size_t)cifs_sb->wsize, |
| 984 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | 989 | write_size - total_written); |
| 985 | == 0))) { | 990 | /* iov[0] is reserved for smb header */ |
| 986 | struct kvec iov[2]; | 991 | iov[1].iov_base = (char *)write_data + total_written; |
| 987 | unsigned int len; | 992 | iov[1].iov_len = len; |
| 988 | 993 | rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len, | |
| 989 | len = min((size_t)cifs_sb->wsize, | 994 | *poffset, &bytes_written, iov, 1, 0); |
| 990 | write_size - total_written); | ||
| 991 | /* iov[0] is reserved for smb header */ | ||
| 992 | iov[1].iov_base = (char *)write_data + | ||
| 993 | total_written; | ||
| 994 | iov[1].iov_len = len; | ||
| 995 | rc = CIFSSMBWrite2(xid, pTcon, | ||
| 996 | open_file->netfid, len, | ||
| 997 | *poffset, &bytes_written, | ||
| 998 | iov, 1, 0); | ||
| 999 | } else | ||
| 1000 | rc = CIFSSMBWrite(xid, pTcon, | ||
| 1001 | open_file->netfid, | ||
| 1002 | min_t(const int, cifs_sb->wsize, | ||
| 1003 | write_size - total_written), | ||
| 1004 | *poffset, &bytes_written, | ||
| 1005 | write_data + total_written, | ||
| 1006 | NULL, 0); | ||
| 1007 | } | 995 | } |
| 1008 | if (rc || (bytes_written == 0)) { | 996 | if (rc || (bytes_written == 0)) { |
| 1009 | if (total_written) | 997 | if (total_written) |
| @@ -1240,12 +1228,6 @@ static int cifs_writepages(struct address_space *mapping, | |||
| 1240 | } | 1228 | } |
| 1241 | 1229 | ||
| 1242 | tcon = tlink_tcon(open_file->tlink); | 1230 | tcon = tlink_tcon(open_file->tlink); |
| 1243 | if (!experimEnabled && tcon->ses->server->secMode & | ||
| 1244 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { | ||
| 1245 | cifsFileInfo_put(open_file); | ||
| 1246 | kfree(iov); | ||
| 1247 | return generic_writepages(mapping, wbc); | ||
| 1248 | } | ||
| 1249 | cifsFileInfo_put(open_file); | 1231 | cifsFileInfo_put(open_file); |
| 1250 | 1232 | ||
| 1251 | xid = GetXid(); | 1233 | xid = GetXid(); |
| @@ -1980,6 +1962,24 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, | |||
| 1980 | return total_read; | 1962 | return total_read; |
| 1981 | } | 1963 | } |
| 1982 | 1964 | ||
| 1965 | /* | ||
| 1966 | * If the page is mmap'ed into a process' page tables, then we need to make | ||
| 1967 | * sure that it doesn't change while being written back. | ||
| 1968 | */ | ||
| 1969 | static int | ||
| 1970 | cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
| 1971 | { | ||
| 1972 | struct page *page = vmf->page; | ||
| 1973 | |||
| 1974 | lock_page(page); | ||
| 1975 | return VM_FAULT_LOCKED; | ||
| 1976 | } | ||
| 1977 | |||
| 1978 | static struct vm_operations_struct cifs_file_vm_ops = { | ||
| 1979 | .fault = filemap_fault, | ||
| 1980 | .page_mkwrite = cifs_page_mkwrite, | ||
| 1981 | }; | ||
| 1982 | |||
| 1983 | int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) | 1983 | int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) |
| 1984 | { | 1984 | { |
| 1985 | int rc, xid; | 1985 | int rc, xid; |
| @@ -1991,6 +1991,8 @@ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 1991 | cifs_invalidate_mapping(inode); | 1991 | cifs_invalidate_mapping(inode); |
| 1992 | 1992 | ||
| 1993 | rc = generic_file_mmap(file, vma); | 1993 | rc = generic_file_mmap(file, vma); |
| 1994 | if (rc == 0) | ||
| 1995 | vma->vm_ops = &cifs_file_vm_ops; | ||
| 1994 | FreeXid(xid); | 1996 | FreeXid(xid); |
| 1995 | return rc; | 1997 | return rc; |
| 1996 | } | 1998 | } |
| @@ -2007,6 +2009,8 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 2007 | return rc; | 2009 | return rc; |
| 2008 | } | 2010 | } |
| 2009 | rc = generic_file_mmap(file, vma); | 2011 | rc = generic_file_mmap(file, vma); |
| 2012 | if (rc == 0) | ||
| 2013 | vma->vm_ops = &cifs_file_vm_ops; | ||
| 2010 | FreeXid(xid); | 2014 | FreeXid(xid); |
| 2011 | return rc; | 2015 | return rc; |
| 2012 | } | 2016 | } |
diff --git a/fs/cifs/link.c b/fs/cifs/link.c index e8804d373404..ce417a9764a3 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c | |||
| @@ -239,7 +239,7 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon, | |||
| 239 | if (rc != 0) | 239 | if (rc != 0) |
| 240 | return rc; | 240 | return rc; |
| 241 | 241 | ||
| 242 | if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) { | 242 | if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { |
| 243 | CIFSSMBClose(xid, tcon, netfid); | 243 | CIFSSMBClose(xid, tcon, netfid); |
| 244 | /* it's not a symlink */ | 244 | /* it's not a symlink */ |
| 245 | return -EINVAL; | 245 | return -EINVAL; |
| @@ -316,7 +316,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr, | |||
| 316 | if (rc != 0) | 316 | if (rc != 0) |
| 317 | goto out; | 317 | goto out; |
| 318 | 318 | ||
| 319 | if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) { | 319 | if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { |
| 320 | CIFSSMBClose(xid, pTcon, netfid); | 320 | CIFSSMBClose(xid, pTcon, netfid); |
| 321 | /* it's not a symlink */ | 321 | /* it's not a symlink */ |
| 322 | goto out; | 322 | goto out; |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 2a930a752a78..0c684ae4c071 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
| @@ -100,6 +100,7 @@ sesInfoFree(struct cifsSesInfo *buf_to_free) | |||
| 100 | memset(buf_to_free->password, 0, strlen(buf_to_free->password)); | 100 | memset(buf_to_free->password, 0, strlen(buf_to_free->password)); |
| 101 | kfree(buf_to_free->password); | 101 | kfree(buf_to_free->password); |
| 102 | } | 102 | } |
| 103 | kfree(buf_to_free->user_name); | ||
| 103 | kfree(buf_to_free->domainName); | 104 | kfree(buf_to_free->domainName); |
| 104 | kfree(buf_to_free); | 105 | kfree(buf_to_free); |
| 105 | } | 106 | } |
| @@ -520,7 +521,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
| 520 | (struct smb_com_transaction_change_notify_rsp *)buf; | 521 | (struct smb_com_transaction_change_notify_rsp *)buf; |
| 521 | struct file_notify_information *pnotify; | 522 | struct file_notify_information *pnotify; |
| 522 | __u32 data_offset = 0; | 523 | __u32 data_offset = 0; |
| 523 | if (pSMBr->ByteCount > sizeof(struct file_notify_information)) { | 524 | if (get_bcc_le(buf) > sizeof(struct file_notify_information)) { |
| 524 | data_offset = le32_to_cpu(pSMBr->DataOffset); | 525 | data_offset = le32_to_cpu(pSMBr->DataOffset); |
| 525 | 526 | ||
| 526 | pnotify = (struct file_notify_information *) | 527 | pnotify = (struct file_notify_information *) |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 16765703131b..f6728eb6f4b9 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
| @@ -219,12 +219,12 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, | |||
| 219 | bcc_ptr++; | 219 | bcc_ptr++; |
| 220 | } */ | 220 | } */ |
| 221 | /* copy user */ | 221 | /* copy user */ |
| 222 | if (ses->userName == NULL) { | 222 | if (ses->user_name == NULL) { |
| 223 | /* null user mount */ | 223 | /* null user mount */ |
| 224 | *bcc_ptr = 0; | 224 | *bcc_ptr = 0; |
| 225 | *(bcc_ptr+1) = 0; | 225 | *(bcc_ptr+1) = 0; |
| 226 | } else { | 226 | } else { |
| 227 | bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName, | 227 | bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->user_name, |
| 228 | MAX_USERNAME_SIZE, nls_cp); | 228 | MAX_USERNAME_SIZE, nls_cp); |
| 229 | } | 229 | } |
| 230 | bcc_ptr += 2 * bytes_ret; | 230 | bcc_ptr += 2 * bytes_ret; |
| @@ -244,12 +244,11 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, | |||
| 244 | /* copy user */ | 244 | /* copy user */ |
| 245 | /* BB what about null user mounts - check that we do this BB */ | 245 | /* BB what about null user mounts - check that we do this BB */ |
| 246 | /* copy user */ | 246 | /* copy user */ |
| 247 | if (ses->userName == NULL) { | 247 | if (ses->user_name != NULL) |
| 248 | /* BB what about null user mounts - check that we do this BB */ | 248 | strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE); |
| 249 | } else { | 249 | /* else null user mount */ |
| 250 | strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE); | 250 | |
| 251 | } | 251 | bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE); |
| 252 | bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE); | ||
| 253 | *bcc_ptr = 0; | 252 | *bcc_ptr = 0; |
| 254 | bcc_ptr++; /* account for null termination */ | 253 | bcc_ptr++; /* account for null termination */ |
| 255 | 254 | ||
| @@ -405,8 +404,8 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, | |||
| 405 | /* BB spec says that if AvId field of MsvAvTimestamp is populated then | 404 | /* BB spec says that if AvId field of MsvAvTimestamp is populated then |
| 406 | we must set the MIC field of the AUTHENTICATE_MESSAGE */ | 405 | we must set the MIC field of the AUTHENTICATE_MESSAGE */ |
| 407 | ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags); | 406 | ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags); |
| 408 | tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset); | 407 | tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset); |
| 409 | tilen = cpu_to_le16(pblob->TargetInfoArray.Length); | 408 | tilen = le16_to_cpu(pblob->TargetInfoArray.Length); |
| 410 | if (tilen) { | 409 | if (tilen) { |
| 411 | ses->auth_key.response = kmalloc(tilen, GFP_KERNEL); | 410 | ses->auth_key.response = kmalloc(tilen, GFP_KERNEL); |
| 412 | if (!ses->auth_key.response) { | 411 | if (!ses->auth_key.response) { |
| @@ -523,14 +522,14 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, | |||
| 523 | tmp += len; | 522 | tmp += len; |
| 524 | } | 523 | } |
| 525 | 524 | ||
| 526 | if (ses->userName == NULL) { | 525 | if (ses->user_name == NULL) { |
| 527 | sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); | 526 | sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); |
| 528 | sec_blob->UserName.Length = 0; | 527 | sec_blob->UserName.Length = 0; |
| 529 | sec_blob->UserName.MaximumLength = 0; | 528 | sec_blob->UserName.MaximumLength = 0; |
| 530 | tmp += 2; | 529 | tmp += 2; |
| 531 | } else { | 530 | } else { |
| 532 | int len; | 531 | int len; |
| 533 | len = cifs_strtoUCS((__le16 *)tmp, ses->userName, | 532 | len = cifs_strtoUCS((__le16 *)tmp, ses->user_name, |
| 534 | MAX_USERNAME_SIZE, nls_cp); | 533 | MAX_USERNAME_SIZE, nls_cp); |
| 535 | len *= 2; /* unicode is 2 bytes each */ | 534 | len *= 2; /* unicode is 2 bytes each */ |
| 536 | sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); | 535 | sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); |
diff --git a/fs/dcache.c b/fs/dcache.c index ad25c4cec7d5..129a35730994 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
| @@ -2131,7 +2131,7 @@ EXPORT_SYMBOL(d_rehash); | |||
| 2131 | */ | 2131 | */ |
| 2132 | void dentry_update_name_case(struct dentry *dentry, struct qstr *name) | 2132 | void dentry_update_name_case(struct dentry *dentry, struct qstr *name) |
| 2133 | { | 2133 | { |
| 2134 | BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); | 2134 | BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex)); |
| 2135 | BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ | 2135 | BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ |
| 2136 | 2136 | ||
| 2137 | spin_lock(&dentry->d_lock); | 2137 | spin_lock(&dentry->d_lock); |
diff --git a/fs/fhandle.c b/fs/fhandle.c index bf93ad2bee07..6b088641f5bf 100644 --- a/fs/fhandle.c +++ b/fs/fhandle.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <linux/exportfs.h> | 7 | #include <linux/exportfs.h> |
| 8 | #include <linux/fs_struct.h> | 8 | #include <linux/fs_struct.h> |
| 9 | #include <linux/fsnotify.h> | 9 | #include <linux/fsnotify.h> |
| 10 | #include <linux/personality.h> | ||
| 10 | #include <asm/uaccess.h> | 11 | #include <asm/uaccess.h> |
| 11 | #include "internal.h" | 12 | #include "internal.h" |
| 12 | 13 | ||
diff --git a/fs/filesystems.c b/fs/filesystems.c index 751d6b255a12..0845f84f2a5f 100644 --- a/fs/filesystems.c +++ b/fs/filesystems.c | |||
| @@ -110,14 +110,13 @@ int unregister_filesystem(struct file_system_type * fs) | |||
| 110 | *tmp = fs->next; | 110 | *tmp = fs->next; |
| 111 | fs->next = NULL; | 111 | fs->next = NULL; |
| 112 | write_unlock(&file_systems_lock); | 112 | write_unlock(&file_systems_lock); |
| 113 | synchronize_rcu(); | ||
| 113 | return 0; | 114 | return 0; |
| 114 | } | 115 | } |
| 115 | tmp = &(*tmp)->next; | 116 | tmp = &(*tmp)->next; |
| 116 | } | 117 | } |
| 117 | write_unlock(&file_systems_lock); | 118 | write_unlock(&file_systems_lock); |
| 118 | 119 | ||
| 119 | synchronize_rcu(); | ||
| 120 | |||
| 121 | return -EINVAL; | 120 | return -EINVAL; |
| 122 | } | 121 | } |
| 123 | 122 | ||
diff --git a/fs/namei.c b/fs/namei.c index e6cd6113872c..54fc993e3027 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
| @@ -697,6 +697,7 @@ static __always_inline void set_root_rcu(struct nameidata *nd) | |||
| 697 | do { | 697 | do { |
| 698 | seq = read_seqcount_begin(&fs->seq); | 698 | seq = read_seqcount_begin(&fs->seq); |
| 699 | nd->root = fs->root; | 699 | nd->root = fs->root; |
| 700 | nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq); | ||
| 700 | } while (read_seqcount_retry(&fs->seq, seq)); | 701 | } while (read_seqcount_retry(&fs->seq, seq)); |
| 701 | } | 702 | } |
| 702 | } | 703 | } |
diff --git a/fs/namespace.c b/fs/namespace.c index 7dba2ed03429..d99bcf59e4c2 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
| @@ -1030,18 +1030,6 @@ const struct seq_operations mounts_op = { | |||
| 1030 | .show = show_vfsmnt | 1030 | .show = show_vfsmnt |
| 1031 | }; | 1031 | }; |
| 1032 | 1032 | ||
| 1033 | static int uuid_is_nil(u8 *uuid) | ||
| 1034 | { | ||
| 1035 | int i; | ||
| 1036 | u8 *cp = (u8 *)uuid; | ||
| 1037 | |||
| 1038 | for (i = 0; i < 16; i++) { | ||
| 1039 | if (*cp++) | ||
| 1040 | return 0; | ||
| 1041 | } | ||
| 1042 | return 1; | ||
| 1043 | } | ||
| 1044 | |||
| 1045 | static int show_mountinfo(struct seq_file *m, void *v) | 1033 | static int show_mountinfo(struct seq_file *m, void *v) |
| 1046 | { | 1034 | { |
| 1047 | struct proc_mounts *p = m->private; | 1035 | struct proc_mounts *p = m->private; |
| @@ -1085,10 +1073,6 @@ static int show_mountinfo(struct seq_file *m, void *v) | |||
| 1085 | if (IS_MNT_UNBINDABLE(mnt)) | 1073 | if (IS_MNT_UNBINDABLE(mnt)) |
| 1086 | seq_puts(m, " unbindable"); | 1074 | seq_puts(m, " unbindable"); |
| 1087 | 1075 | ||
| 1088 | if (!uuid_is_nil(mnt->mnt_sb->s_uuid)) | ||
| 1089 | /* print the uuid */ | ||
| 1090 | seq_printf(m, " uuid:%pU", mnt->mnt_sb->s_uuid); | ||
| 1091 | |||
| 1092 | /* Filesystem specific data */ | 1076 | /* Filesystem specific data */ |
| 1093 | seq_puts(m, " - "); | 1077 | seq_puts(m, " - "); |
| 1094 | show_type(m, sb); | 1078 | show_type(m, sb); |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index af0c6279a4a7..e4cbc11a74ab 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
| @@ -542,11 +542,15 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, u | |||
| 542 | if (!nfs_need_commit(nfsi)) | 542 | if (!nfs_need_commit(nfsi)) |
| 543 | return 0; | 543 | return 0; |
| 544 | 544 | ||
| 545 | spin_lock(&inode->i_lock); | ||
| 545 | ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); | 546 | ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); |
| 546 | if (ret > 0) | 547 | if (ret > 0) |
| 547 | nfsi->ncommit -= ret; | 548 | nfsi->ncommit -= ret; |
| 549 | spin_unlock(&inode->i_lock); | ||
| 550 | |||
| 548 | if (nfs_need_commit(NFS_I(inode))) | 551 | if (nfs_need_commit(NFS_I(inode))) |
| 549 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | 552 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); |
| 553 | |||
| 550 | return ret; | 554 | return ret; |
| 551 | } | 555 | } |
| 552 | #else | 556 | #else |
| @@ -1483,9 +1487,7 @@ int nfs_commit_inode(struct inode *inode, int how) | |||
| 1483 | res = nfs_commit_set_lock(NFS_I(inode), may_wait); | 1487 | res = nfs_commit_set_lock(NFS_I(inode), may_wait); |
| 1484 | if (res <= 0) | 1488 | if (res <= 0) |
| 1485 | goto out_mark_dirty; | 1489 | goto out_mark_dirty; |
| 1486 | spin_lock(&inode->i_lock); | ||
| 1487 | res = nfs_scan_commit(inode, &head, 0, 0); | 1490 | res = nfs_scan_commit(inode, &head, 0, 0); |
| 1488 | spin_unlock(&inode->i_lock); | ||
| 1489 | if (res) { | 1491 | if (res) { |
| 1490 | int error; | 1492 | int error; |
| 1491 | 1493 | ||
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c index b10e3540d5b7..ce4f62440425 100644 --- a/fs/partitions/ldm.c +++ b/fs/partitions/ldm.c | |||
| @@ -1299,6 +1299,11 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags) | |||
| 1299 | 1299 | ||
| 1300 | BUG_ON (!data || !frags); | 1300 | BUG_ON (!data || !frags); |
| 1301 | 1301 | ||
| 1302 | if (size < 2 * VBLK_SIZE_HEAD) { | ||
| 1303 | ldm_error("Value of size is to small."); | ||
| 1304 | return false; | ||
| 1305 | } | ||
| 1306 | |||
| 1302 | group = get_unaligned_be32(data + 0x08); | 1307 | group = get_unaligned_be32(data + 0x08); |
| 1303 | rec = get_unaligned_be16(data + 0x0C); | 1308 | rec = get_unaligned_be16(data + 0x0C); |
| 1304 | num = get_unaligned_be16(data + 0x0E); | 1309 | num = get_unaligned_be16(data + 0x0E); |
| @@ -1306,6 +1311,10 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags) | |||
| 1306 | ldm_error ("A VBLK claims to have %d parts.", num); | 1311 | ldm_error ("A VBLK claims to have %d parts.", num); |
| 1307 | return false; | 1312 | return false; |
| 1308 | } | 1313 | } |
| 1314 | if (rec >= num) { | ||
| 1315 | ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num); | ||
| 1316 | return false; | ||
| 1317 | } | ||
| 1309 | 1318 | ||
| 1310 | list_for_each (item, frags) { | 1319 | list_for_each (item, frags) { |
| 1311 | f = list_entry (item, struct frag, list); | 1320 | f = list_entry (item, struct frag, list); |
| @@ -1334,10 +1343,9 @@ found: | |||
| 1334 | 1343 | ||
| 1335 | f->map |= (1 << rec); | 1344 | f->map |= (1 << rec); |
| 1336 | 1345 | ||
| 1337 | if (num > 0) { | 1346 | data += VBLK_SIZE_HEAD; |
| 1338 | data += VBLK_SIZE_HEAD; | 1347 | size -= VBLK_SIZE_HEAD; |
| 1339 | size -= VBLK_SIZE_HEAD; | 1348 | |
| 1340 | } | ||
| 1341 | memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size); | 1349 | memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size); |
| 1342 | 1350 | ||
| 1343 | return true; | 1351 | return true; |
diff --git a/fs/proc/base.c b/fs/proc/base.c index dd6628d3ba42..dfa532730e55 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -3124,11 +3124,16 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi | |||
| 3124 | /* for the /proc/ directory itself, after non-process stuff has been done */ | 3124 | /* for the /proc/ directory itself, after non-process stuff has been done */ |
| 3125 | int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) | 3125 | int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) |
| 3126 | { | 3126 | { |
| 3127 | unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY; | 3127 | unsigned int nr; |
| 3128 | struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode); | 3128 | struct task_struct *reaper; |
| 3129 | struct tgid_iter iter; | 3129 | struct tgid_iter iter; |
| 3130 | struct pid_namespace *ns; | 3130 | struct pid_namespace *ns; |
| 3131 | 3131 | ||
| 3132 | if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET) | ||
| 3133 | goto out_no_task; | ||
| 3134 | nr = filp->f_pos - FIRST_PROCESS_ENTRY; | ||
| 3135 | |||
| 3136 | reaper = get_proc_task(filp->f_path.dentry->d_inode); | ||
| 3132 | if (!reaper) | 3137 | if (!reaper) |
| 3133 | goto out_no_task; | 3138 | goto out_no_task; |
| 3134 | 3139 | ||
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 9eead2c796b7..fbb0b478a346 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c | |||
| @@ -112,6 +112,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) | |||
| 112 | SetPageDirty(page); | 112 | SetPageDirty(page); |
| 113 | 113 | ||
| 114 | unlock_page(page); | 114 | unlock_page(page); |
| 115 | put_page(page); | ||
| 115 | } | 116 | } |
| 116 | 117 | ||
| 117 | return 0; | 118 | return 0; |
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index 919f0de29d8f..e6493cac193d 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h | |||
| @@ -23,6 +23,12 @@ | |||
| 23 | #ifndef __UBIFS_DEBUG_H__ | 23 | #ifndef __UBIFS_DEBUG_H__ |
| 24 | #define __UBIFS_DEBUG_H__ | 24 | #define __UBIFS_DEBUG_H__ |
| 25 | 25 | ||
| 26 | /* Checking helper functions */ | ||
| 27 | typedef int (*dbg_leaf_callback)(struct ubifs_info *c, | ||
| 28 | struct ubifs_zbranch *zbr, void *priv); | ||
| 29 | typedef int (*dbg_znode_callback)(struct ubifs_info *c, | ||
| 30 | struct ubifs_znode *znode, void *priv); | ||
| 31 | |||
| 26 | #ifdef CONFIG_UBIFS_FS_DEBUG | 32 | #ifdef CONFIG_UBIFS_FS_DEBUG |
| 27 | 33 | ||
| 28 | /** | 34 | /** |
| @@ -270,11 +276,6 @@ void dbg_dump_tnc(struct ubifs_info *c); | |||
| 270 | void dbg_dump_index(struct ubifs_info *c); | 276 | void dbg_dump_index(struct ubifs_info *c); |
| 271 | void dbg_dump_lpt_lebs(const struct ubifs_info *c); | 277 | void dbg_dump_lpt_lebs(const struct ubifs_info *c); |
| 272 | 278 | ||
| 273 | /* Checking helper functions */ | ||
| 274 | typedef int (*dbg_leaf_callback)(struct ubifs_info *c, | ||
| 275 | struct ubifs_zbranch *zbr, void *priv); | ||
| 276 | typedef int (*dbg_znode_callback)(struct ubifs_info *c, | ||
| 277 | struct ubifs_znode *znode, void *priv); | ||
| 278 | int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb, | 279 | int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb, |
| 279 | dbg_znode_callback znode_cb, void *priv); | 280 | dbg_znode_callback znode_cb, void *priv); |
| 280 | 281 | ||
| @@ -295,7 +296,6 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size); | |||
| 295 | int dbg_check_filesystem(struct ubifs_info *c); | 296 | int dbg_check_filesystem(struct ubifs_info *c); |
| 296 | void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, | 297 | void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, |
| 297 | int add_pos); | 298 | int add_pos); |
| 298 | int dbg_check_lprops(struct ubifs_info *c); | ||
| 299 | int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, | 299 | int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, |
| 300 | int row, int col); | 300 | int row, int col); |
| 301 | int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, | 301 | int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, |
| @@ -401,58 +401,94 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c); | |||
| 401 | #define DBGKEY(key) ((char *)(key)) | 401 | #define DBGKEY(key) ((char *)(key)) |
| 402 | #define DBGKEY1(key) ((char *)(key)) | 402 | #define DBGKEY1(key) ((char *)(key)) |
| 403 | 403 | ||
| 404 | #define ubifs_debugging_init(c) 0 | 404 | static inline int ubifs_debugging_init(struct ubifs_info *c) { return 0; } |
| 405 | #define ubifs_debugging_exit(c) ({}) | 405 | static inline void ubifs_debugging_exit(struct ubifs_info *c) { return; } |
| 406 | 406 | static inline const char *dbg_ntype(int type) { return ""; } | |
| 407 | #define dbg_ntype(type) "" | 407 | static inline const char *dbg_cstate(int cmt_state) { return ""; } |
| 408 | #define dbg_cstate(cmt_state) "" | 408 | static inline const char *dbg_jhead(int jhead) { return ""; } |
| 409 | #define dbg_jhead(jhead) "" | 409 | static inline const char * |
| 410 | #define dbg_get_key_dump(c, key) ({}) | 410 | dbg_get_key_dump(const struct ubifs_info *c, |
| 411 | #define dbg_dump_inode(c, inode) ({}) | 411 | const union ubifs_key *key) { return ""; } |
| 412 | #define dbg_dump_node(c, node) ({}) | 412 | static inline void dbg_dump_inode(const struct ubifs_info *c, |
| 413 | #define dbg_dump_lpt_node(c, node, lnum, offs) ({}) | 413 | const struct inode *inode) { return; } |
| 414 | #define dbg_dump_budget_req(req) ({}) | 414 | static inline void dbg_dump_node(const struct ubifs_info *c, |
| 415 | #define dbg_dump_lstats(lst) ({}) | 415 | const void *node) { return; } |
| 416 | #define dbg_dump_budg(c) ({}) | 416 | static inline void dbg_dump_lpt_node(const struct ubifs_info *c, |
| 417 | #define dbg_dump_lprop(c, lp) ({}) | 417 | void *node, int lnum, |
| 418 | #define dbg_dump_lprops(c) ({}) | 418 | int offs) { return; } |
| 419 | #define dbg_dump_lpt_info(c) ({}) | 419 | static inline void |
| 420 | #define dbg_dump_leb(c, lnum) ({}) | 420 | dbg_dump_budget_req(const struct ubifs_budget_req *req) { return; } |
| 421 | #define dbg_dump_znode(c, znode) ({}) | 421 | static inline void |
| 422 | #define dbg_dump_heap(c, heap, cat) ({}) | 422 | dbg_dump_lstats(const struct ubifs_lp_stats *lst) { return; } |
| 423 | #define dbg_dump_pnode(c, pnode, parent, iip) ({}) | 423 | static inline void dbg_dump_budg(struct ubifs_info *c) { return; } |
| 424 | #define dbg_dump_tnc(c) ({}) | 424 | static inline void dbg_dump_lprop(const struct ubifs_info *c, |
| 425 | #define dbg_dump_index(c) ({}) | 425 | const struct ubifs_lprops *lp) { return; } |
| 426 | #define dbg_dump_lpt_lebs(c) ({}) | 426 | static inline void dbg_dump_lprops(struct ubifs_info *c) { return; } |
| 427 | 427 | static inline void dbg_dump_lpt_info(struct ubifs_info *c) { return; } | |
| 428 | #define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0 | 428 | static inline void dbg_dump_leb(const struct ubifs_info *c, |
| 429 | #define dbg_old_index_check_init(c, zroot) 0 | 429 | int lnum) { return; } |
| 430 | #define dbg_save_space_info(c) ({}) | 430 | static inline void |
| 431 | #define dbg_check_space_info(c) 0 | 431 | dbg_dump_znode(const struct ubifs_info *c, |
| 432 | #define dbg_check_old_index(c, zroot) 0 | 432 | const struct ubifs_znode *znode) { return; } |
| 433 | #define dbg_check_cats(c) 0 | 433 | static inline void dbg_dump_heap(struct ubifs_info *c, |
| 434 | #define dbg_check_ltab(c) 0 | 434 | struct ubifs_lpt_heap *heap, |
| 435 | #define dbg_chk_lpt_free_spc(c) 0 | 435 | int cat) { return; } |
| 436 | #define dbg_chk_lpt_sz(c, action, len) 0 | 436 | static inline void dbg_dump_pnode(struct ubifs_info *c, |
| 437 | #define dbg_check_synced_i_size(inode) 0 | 437 | struct ubifs_pnode *pnode, |
| 438 | #define dbg_check_dir_size(c, dir) 0 | 438 | struct ubifs_nnode *parent, |
| 439 | #define dbg_check_tnc(c, x) 0 | 439 | int iip) { return; } |
| 440 | #define dbg_check_idx_size(c, idx_size) 0 | 440 | static inline void dbg_dump_tnc(struct ubifs_info *c) { return; } |
| 441 | #define dbg_check_filesystem(c) 0 | 441 | static inline void dbg_dump_index(struct ubifs_info *c) { return; } |
| 442 | #define dbg_check_heap(c, heap, cat, add_pos) ({}) | 442 | static inline void dbg_dump_lpt_lebs(const struct ubifs_info *c) { return; } |
| 443 | #define dbg_check_lprops(c) 0 | 443 | |
| 444 | #define dbg_check_lpt_nodes(c, cnode, row, col) 0 | 444 | static inline int dbg_walk_index(struct ubifs_info *c, |
| 445 | #define dbg_check_inode_size(c, inode, size) 0 | 445 | dbg_leaf_callback leaf_cb, |
| 446 | #define dbg_check_data_nodes_order(c, head) 0 | 446 | dbg_znode_callback znode_cb, |
| 447 | #define dbg_check_nondata_nodes_order(c, head) 0 | 447 | void *priv) { return 0; } |
| 448 | #define dbg_force_in_the_gaps_enabled 0 | 448 | static inline void dbg_save_space_info(struct ubifs_info *c) { return; } |
| 449 | #define dbg_force_in_the_gaps() 0 | 449 | static inline int dbg_check_space_info(struct ubifs_info *c) { return 0; } |
| 450 | #define dbg_failure_mode 0 | 450 | static inline int dbg_check_lprops(struct ubifs_info *c) { return 0; } |
| 451 | 451 | static inline int | |
| 452 | #define dbg_debugfs_init() 0 | 452 | dbg_old_index_check_init(struct ubifs_info *c, |
| 453 | #define dbg_debugfs_exit() | 453 | struct ubifs_zbranch *zroot) { return 0; } |
| 454 | #define dbg_debugfs_init_fs(c) 0 | 454 | static inline int |
| 455 | #define dbg_debugfs_exit_fs(c) 0 | 455 | dbg_check_old_index(struct ubifs_info *c, |
| 456 | struct ubifs_zbranch *zroot) { return 0; } | ||
| 457 | static inline int dbg_check_cats(struct ubifs_info *c) { return 0; } | ||
| 458 | static inline int dbg_check_ltab(struct ubifs_info *c) { return 0; } | ||
| 459 | static inline int dbg_chk_lpt_free_spc(struct ubifs_info *c) { return 0; } | ||
| 460 | static inline int dbg_chk_lpt_sz(struct ubifs_info *c, | ||
| 461 | int action, int len) { return 0; } | ||
| 462 | static inline int dbg_check_synced_i_size(struct inode *inode) { return 0; } | ||
| 463 | static inline int dbg_check_dir_size(struct ubifs_info *c, | ||
| 464 | const struct inode *dir) { return 0; } | ||
| 465 | static inline int dbg_check_tnc(struct ubifs_info *c, int extra) { return 0; } | ||
| 466 | static inline int dbg_check_idx_size(struct ubifs_info *c, | ||
| 467 | long long idx_size) { return 0; } | ||
| 468 | static inline int dbg_check_filesystem(struct ubifs_info *c) { return 0; } | ||
| 469 | static inline void dbg_check_heap(struct ubifs_info *c, | ||
| 470 | struct ubifs_lpt_heap *heap, | ||
| 471 | int cat, int add_pos) { return; } | ||
| 472 | static inline int dbg_check_lpt_nodes(struct ubifs_info *c, | ||
| 473 | struct ubifs_cnode *cnode, int row, int col) { return 0; } | ||
| 474 | static inline int dbg_check_inode_size(struct ubifs_info *c, | ||
| 475 | const struct inode *inode, | ||
| 476 | loff_t size) { return 0; } | ||
| 477 | static inline int | ||
| 478 | dbg_check_data_nodes_order(struct ubifs_info *c, | ||
| 479 | struct list_head *head) { return 0; } | ||
| 480 | static inline int | ||
| 481 | dbg_check_nondata_nodes_order(struct ubifs_info *c, | ||
| 482 | struct list_head *head) { return 0; } | ||
| 483 | |||
| 484 | static inline int dbg_force_in_the_gaps(void) { return 0; } | ||
| 485 | #define dbg_force_in_the_gaps_enabled 0 | ||
| 486 | #define dbg_failure_mode 0 | ||
| 487 | |||
| 488 | static inline int dbg_debugfs_init(void) { return 0; } | ||
| 489 | static inline void dbg_debugfs_exit(void) { return; } | ||
| 490 | static inline int dbg_debugfs_init_fs(struct ubifs_info *c) { return 0; } | ||
| 491 | static inline int dbg_debugfs_exit_fs(struct ubifs_info *c) { return 0; } | ||
| 456 | 492 | ||
| 457 | #endif /* !CONFIG_UBIFS_FS_DEBUG */ | 493 | #endif /* !CONFIG_UBIFS_FS_DEBUG */ |
| 458 | #endif /* !__UBIFS_DEBUG_H__ */ | 494 | #endif /* !__UBIFS_DEBUG_H__ */ |
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 28be1e6a65e8..b286db79c686 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
| @@ -1312,6 +1312,9 @@ int ubifs_fsync(struct file *file, int datasync) | |||
| 1312 | 1312 | ||
| 1313 | dbg_gen("syncing inode %lu", inode->i_ino); | 1313 | dbg_gen("syncing inode %lu", inode->i_ino); |
| 1314 | 1314 | ||
| 1315 | if (inode->i_sb->s_flags & MS_RDONLY) | ||
| 1316 | return 0; | ||
| 1317 | |||
| 1315 | /* | 1318 | /* |
| 1316 | * VFS has already synchronized dirty pages for this inode. Synchronize | 1319 | * VFS has already synchronized dirty pages for this inode. Synchronize |
| 1317 | * the inode unless this is a 'datasync()' call. | 1320 | * the inode unless this is a 'datasync()' call. |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 32176cc8e715..cbbfd98ad4a3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -697,7 +697,7 @@ extern void blk_start_queue(struct request_queue *q); | |||
| 697 | extern void blk_stop_queue(struct request_queue *q); | 697 | extern void blk_stop_queue(struct request_queue *q); |
| 698 | extern void blk_sync_queue(struct request_queue *q); | 698 | extern void blk_sync_queue(struct request_queue *q); |
| 699 | extern void __blk_stop_queue(struct request_queue *q); | 699 | extern void __blk_stop_queue(struct request_queue *q); |
| 700 | extern void __blk_run_queue(struct request_queue *q, bool force_kblockd); | 700 | extern void __blk_run_queue(struct request_queue *q); |
| 701 | extern void blk_run_queue(struct request_queue *); | 701 | extern void blk_run_queue(struct request_queue *); |
| 702 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 702 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
| 703 | struct rq_map_data *, void __user *, unsigned long, | 703 | struct rq_map_data *, void __user *, unsigned long, |
| @@ -857,26 +857,39 @@ extern void blk_put_queue(struct request_queue *); | |||
| 857 | struct blk_plug { | 857 | struct blk_plug { |
| 858 | unsigned long magic; | 858 | unsigned long magic; |
| 859 | struct list_head list; | 859 | struct list_head list; |
| 860 | struct list_head cb_list; | ||
| 860 | unsigned int should_sort; | 861 | unsigned int should_sort; |
| 861 | }; | 862 | }; |
| 863 | struct blk_plug_cb { | ||
| 864 | struct list_head list; | ||
| 865 | void (*callback)(struct blk_plug_cb *); | ||
| 866 | }; | ||
| 862 | 867 | ||
| 863 | extern void blk_start_plug(struct blk_plug *); | 868 | extern void blk_start_plug(struct blk_plug *); |
| 864 | extern void blk_finish_plug(struct blk_plug *); | 869 | extern void blk_finish_plug(struct blk_plug *); |
| 865 | extern void __blk_flush_plug(struct task_struct *, struct blk_plug *); | 870 | extern void blk_flush_plug_list(struct blk_plug *, bool); |
| 866 | 871 | ||
| 867 | static inline void blk_flush_plug(struct task_struct *tsk) | 872 | static inline void blk_flush_plug(struct task_struct *tsk) |
| 868 | { | 873 | { |
| 869 | struct blk_plug *plug = tsk->plug; | 874 | struct blk_plug *plug = tsk->plug; |
| 870 | 875 | ||
| 871 | if (unlikely(plug)) | 876 | if (plug) |
| 872 | __blk_flush_plug(tsk, plug); | 877 | blk_flush_plug_list(plug, false); |
| 878 | } | ||
| 879 | |||
| 880 | static inline void blk_schedule_flush_plug(struct task_struct *tsk) | ||
| 881 | { | ||
| 882 | struct blk_plug *plug = tsk->plug; | ||
| 883 | |||
| 884 | if (plug) | ||
| 885 | blk_flush_plug_list(plug, true); | ||
| 873 | } | 886 | } |
| 874 | 887 | ||
| 875 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) | 888 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) |
| 876 | { | 889 | { |
| 877 | struct blk_plug *plug = tsk->plug; | 890 | struct blk_plug *plug = tsk->plug; |
| 878 | 891 | ||
| 879 | return plug && !list_empty(&plug->list); | 892 | return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list)); |
| 880 | } | 893 | } |
| 881 | 894 | ||
| 882 | /* | 895 | /* |
| @@ -1314,6 +1327,11 @@ static inline void blk_flush_plug(struct task_struct *task) | |||
| 1314 | { | 1327 | { |
| 1315 | } | 1328 | } |
| 1316 | 1329 | ||
| 1330 | static inline void blk_schedule_flush_plug(struct task_struct *task) | ||
| 1331 | { | ||
| 1332 | } | ||
| 1333 | |||
| 1334 | |||
| 1317 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) | 1335 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) |
| 1318 | { | 1336 | { |
| 1319 | return false; | 1337 | return false; |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index e2768834f397..32a4423710f5 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
| @@ -197,7 +197,6 @@ struct dm_target { | |||
| 197 | struct dm_target_callbacks { | 197 | struct dm_target_callbacks { |
| 198 | struct list_head list; | 198 | struct list_head list; |
| 199 | int (*congested_fn) (struct dm_target_callbacks *, int); | 199 | int (*congested_fn) (struct dm_target_callbacks *, int); |
| 200 | void (*unplug_fn)(struct dm_target_callbacks *); | ||
| 201 | }; | 200 | }; |
| 202 | 201 | ||
| 203 | int dm_register_target(struct target_type *t); | 202 | int dm_register_target(struct target_type *t); |
diff --git a/include/linux/input.h b/include/linux/input.h index f3a7794a18c4..771d6d85667d 100644 --- a/include/linux/input.h +++ b/include/linux/input.h | |||
| @@ -167,6 +167,7 @@ struct input_keymap_entry { | |||
| 167 | #define SYN_REPORT 0 | 167 | #define SYN_REPORT 0 |
| 168 | #define SYN_CONFIG 1 | 168 | #define SYN_CONFIG 1 |
| 169 | #define SYN_MT_REPORT 2 | 169 | #define SYN_MT_REPORT 2 |
| 170 | #define SYN_DROPPED 3 | ||
| 170 | 171 | ||
| 171 | /* | 172 | /* |
| 172 | * Keys and buttons | 173 | * Keys and buttons |
| @@ -553,8 +554,8 @@ struct input_keymap_entry { | |||
| 553 | #define KEY_DVD 0x185 /* Media Select DVD */ | 554 | #define KEY_DVD 0x185 /* Media Select DVD */ |
| 554 | #define KEY_AUX 0x186 | 555 | #define KEY_AUX 0x186 |
| 555 | #define KEY_MP3 0x187 | 556 | #define KEY_MP3 0x187 |
| 556 | #define KEY_AUDIO 0x188 | 557 | #define KEY_AUDIO 0x188 /* AL Audio Browser */ |
| 557 | #define KEY_VIDEO 0x189 | 558 | #define KEY_VIDEO 0x189 /* AL Movie Browser */ |
| 558 | #define KEY_DIRECTORY 0x18a | 559 | #define KEY_DIRECTORY 0x18a |
| 559 | #define KEY_LIST 0x18b | 560 | #define KEY_LIST 0x18b |
| 560 | #define KEY_MEMO 0x18c /* Media Select Messages */ | 561 | #define KEY_MEMO 0x18c /* Media Select Messages */ |
| @@ -603,8 +604,9 @@ struct input_keymap_entry { | |||
| 603 | #define KEY_FRAMEFORWARD 0x1b5 | 604 | #define KEY_FRAMEFORWARD 0x1b5 |
| 604 | #define KEY_CONTEXT_MENU 0x1b6 /* GenDesc - system context menu */ | 605 | #define KEY_CONTEXT_MENU 0x1b6 /* GenDesc - system context menu */ |
| 605 | #define KEY_MEDIA_REPEAT 0x1b7 /* Consumer - transport control */ | 606 | #define KEY_MEDIA_REPEAT 0x1b7 /* Consumer - transport control */ |
| 606 | #define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */ | 607 | #define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */ |
| 607 | #define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */ | 608 | #define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */ |
| 609 | #define KEY_IMAGES 0x1ba /* AL Image Browser */ | ||
| 608 | 610 | ||
| 609 | #define KEY_DEL_EOL 0x1c0 | 611 | #define KEY_DEL_EOL 0x1c0 |
| 610 | #define KEY_DEL_EOS 0x1c1 | 612 | #define KEY_DEL_EOS 0x1c1 |
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h index b3ac06a4435d..318bb82325a6 100644 --- a/include/linux/input/mt.h +++ b/include/linux/input/mt.h | |||
| @@ -48,6 +48,12 @@ static inline void input_mt_slot(struct input_dev *dev, int slot) | |||
| 48 | input_event(dev, EV_ABS, ABS_MT_SLOT, slot); | 48 | input_event(dev, EV_ABS, ABS_MT_SLOT, slot); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | static inline bool input_is_mt_axis(int axis) | ||
| 52 | { | ||
| 53 | return axis == ABS_MT_SLOT || | ||
| 54 | (axis >= ABS_MT_FIRST && axis <= ABS_MT_LAST); | ||
| 55 | } | ||
| 56 | |||
| 51 | void input_mt_report_slot_state(struct input_dev *dev, | 57 | void input_mt_report_slot_state(struct input_dev *dev, |
| 52 | unsigned int tool_type, bool active); | 58 | unsigned int tool_type, bool active); |
| 53 | 59 | ||
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 5a5ce7055839..5e9840f50980 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -216,7 +216,7 @@ static inline void mem_cgroup_del_lru_list(struct page *page, int lru) | |||
| 216 | return ; | 216 | return ; |
| 217 | } | 217 | } |
| 218 | 218 | ||
| 219 | static inline inline void mem_cgroup_rotate_reclaimable_page(struct page *page) | 219 | static inline void mem_cgroup_rotate_reclaimable_page(struct page *page) |
| 220 | { | 220 | { |
| 221 | return ; | 221 | return ; |
| 222 | } | 222 | } |
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index ad1b19aa6508..aef23309a742 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h | |||
| @@ -86,16 +86,25 @@ extern int mfd_clone_cell(const char *cell, const char **clones, | |||
| 86 | */ | 86 | */ |
| 87 | static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev) | 87 | static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev) |
| 88 | { | 88 | { |
| 89 | return pdev->dev.platform_data; | 89 | return pdev->mfd_cell; |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | /* | 92 | /* |
| 93 | * Given a platform device that's been created by mfd_add_devices(), fetch | 93 | * Given a platform device that's been created by mfd_add_devices(), fetch |
| 94 | * the .mfd_data entry from the mfd_cell that created it. | 94 | * the .mfd_data entry from the mfd_cell that created it. |
| 95 | * Otherwise just return the platform_data pointer. | ||
| 96 | * This maintains compatibility with platform drivers whose devices aren't | ||
| 97 | * created by the mfd layer, and expect platform_data to contain what would've | ||
| 98 | * otherwise been in mfd_data. | ||
| 95 | */ | 99 | */ |
| 96 | static inline void *mfd_get_data(struct platform_device *pdev) | 100 | static inline void *mfd_get_data(struct platform_device *pdev) |
| 97 | { | 101 | { |
| 98 | return mfd_get_cell(pdev)->mfd_data; | 102 | const struct mfd_cell *cell = mfd_get_cell(pdev); |
| 103 | |||
| 104 | if (cell) | ||
| 105 | return cell->mfd_data; | ||
| 106 | else | ||
| 107 | return pdev->dev.platform_data; | ||
| 99 | } | 108 | } |
| 100 | 109 | ||
| 101 | extern int mfd_add_devices(struct device *parent, int id, | 110 | extern int mfd_add_devices(struct device *parent, int id, |
diff --git a/include/linux/pid.h b/include/linux/pid.h index 31afb7ecbe1f..cdced84261d7 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h | |||
| @@ -117,7 +117,7 @@ extern struct pid *find_vpid(int nr); | |||
| 117 | */ | 117 | */ |
| 118 | extern struct pid *find_get_pid(int nr); | 118 | extern struct pid *find_get_pid(int nr); |
| 119 | extern struct pid *find_ge_pid(int nr, struct pid_namespace *); | 119 | extern struct pid *find_ge_pid(int nr, struct pid_namespace *); |
| 120 | int next_pidmap(struct pid_namespace *pid_ns, int last); | 120 | int next_pidmap(struct pid_namespace *pid_ns, unsigned int last); |
| 121 | 121 | ||
| 122 | extern struct pid *alloc_pid(struct pid_namespace *ns); | 122 | extern struct pid *alloc_pid(struct pid_namespace *ns); |
| 123 | extern void free_pid(struct pid *pid); | 123 | extern void free_pid(struct pid *pid); |
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index d96db9825708..744942c95fec 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
| @@ -14,6 +14,8 @@ | |||
| 14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
| 15 | #include <linux/mod_devicetable.h> | 15 | #include <linux/mod_devicetable.h> |
| 16 | 16 | ||
| 17 | struct mfd_cell; | ||
| 18 | |||
| 17 | struct platform_device { | 19 | struct platform_device { |
| 18 | const char * name; | 20 | const char * name; |
| 19 | int id; | 21 | int id; |
| @@ -23,6 +25,9 @@ struct platform_device { | |||
| 23 | 25 | ||
| 24 | const struct platform_device_id *id_entry; | 26 | const struct platform_device_id *id_entry; |
| 25 | 27 | ||
| 28 | /* MFD cell pointer */ | ||
| 29 | struct mfd_cell *mfd_cell; | ||
| 30 | |||
| 26 | /* arch specific additions */ | 31 | /* arch specific additions */ |
| 27 | struct pdev_archdata archdata; | 32 | struct pdev_archdata archdata; |
| 28 | }; | 33 | }; |
diff --git a/include/linux/rio.h b/include/linux/rio.h index 4e37a7cfa726..4d50611112ba 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h | |||
| @@ -396,7 +396,7 @@ union rio_pw_msg { | |||
| 396 | }; | 396 | }; |
| 397 | 397 | ||
| 398 | /* Architecture and hardware-specific functions */ | 398 | /* Architecture and hardware-specific functions */ |
| 399 | extern void rio_register_mport(struct rio_mport *); | 399 | extern int rio_register_mport(struct rio_mport *); |
| 400 | extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); | 400 | extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); |
| 401 | extern void rio_close_inb_mbox(struct rio_mport *, int); | 401 | extern void rio_close_inb_mbox(struct rio_mport *, int); |
| 402 | extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int); | 402 | extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int); |
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h index 7410d3365e2a..0cee0152aca9 100644 --- a/include/linux/rio_ids.h +++ b/include/linux/rio_ids.h | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #define RIO_DID_IDTCPS6Q 0x035f | 35 | #define RIO_DID_IDTCPS6Q 0x035f |
| 36 | #define RIO_DID_IDTCPS10Q 0x035e | 36 | #define RIO_DID_IDTCPS10Q 0x035e |
| 37 | #define RIO_DID_IDTCPS1848 0x0374 | 37 | #define RIO_DID_IDTCPS1848 0x0374 |
| 38 | #define RIO_DID_IDTCPS1432 0x0375 | ||
| 38 | #define RIO_DID_IDTCPS1616 0x0379 | 39 | #define RIO_DID_IDTCPS1616 0x0379 |
| 39 | #define RIO_DID_IDTVPS1616 0x0377 | 40 | #define RIO_DID_IDTVPS1616 0x0377 |
| 40 | #define RIO_DID_IDTSPS1616 0x0378 | 41 | #define RIO_DID_IDTSPS1616 0x0378 |
diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 2ca7e8a78060..877ece45426f 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h | |||
| @@ -228,6 +228,8 @@ extern int rtc_read_alarm(struct rtc_device *rtc, | |||
| 228 | struct rtc_wkalrm *alrm); | 228 | struct rtc_wkalrm *alrm); |
| 229 | extern int rtc_set_alarm(struct rtc_device *rtc, | 229 | extern int rtc_set_alarm(struct rtc_device *rtc, |
| 230 | struct rtc_wkalrm *alrm); | 230 | struct rtc_wkalrm *alrm); |
| 231 | extern int rtc_initialize_alarm(struct rtc_device *rtc, | ||
| 232 | struct rtc_wkalrm *alrm); | ||
| 231 | extern void rtc_update_irq(struct rtc_device *rtc, | 233 | extern void rtc_update_irq(struct rtc_device *rtc, |
| 232 | unsigned long num, unsigned long events); | 234 | unsigned long num, unsigned long events); |
| 233 | 235 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4ec2c027e92c..18d63cea2848 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -1254,6 +1254,9 @@ struct task_struct { | |||
| 1254 | #endif | 1254 | #endif |
| 1255 | 1255 | ||
| 1256 | struct mm_struct *mm, *active_mm; | 1256 | struct mm_struct *mm, *active_mm; |
| 1257 | #ifdef CONFIG_COMPAT_BRK | ||
| 1258 | unsigned brk_randomized:1; | ||
| 1259 | #endif | ||
| 1257 | #if defined(SPLIT_RSS_COUNTING) | 1260 | #if defined(SPLIT_RSS_COUNTING) |
| 1258 | struct task_rss_stat rss_stat; | 1261 | struct task_rss_stat rss_stat; |
| 1259 | #endif | 1262 | #endif |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 5a89e3612875..083ffea7ba18 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
| @@ -249,6 +249,8 @@ extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); | |||
| 249 | extern int hibernate(void); | 249 | extern int hibernate(void); |
| 250 | extern bool system_entering_hibernation(void); | 250 | extern bool system_entering_hibernation(void); |
| 251 | #else /* CONFIG_HIBERNATION */ | 251 | #else /* CONFIG_HIBERNATION */ |
| 252 | static inline void register_nosave_region(unsigned long b, unsigned long e) {} | ||
| 253 | static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} | ||
| 252 | static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } | 254 | static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } |
| 253 | static inline void swsusp_set_page_free(struct page *p) {} | 255 | static inline void swsusp_set_page_free(struct page *p) {} |
| 254 | static inline void swsusp_unset_page_free(struct page *p) {} | 256 | static inline void swsusp_unset_page_free(struct page *p) {} |
| @@ -297,14 +299,7 @@ static inline bool pm_wakeup_pending(void) { return false; } | |||
| 297 | 299 | ||
| 298 | extern struct mutex pm_mutex; | 300 | extern struct mutex pm_mutex; |
| 299 | 301 | ||
| 300 | #ifndef CONFIG_HIBERNATION | 302 | #ifndef CONFIG_HIBERNATE_CALLBACKS |
| 301 | static inline void register_nosave_region(unsigned long b, unsigned long e) | ||
| 302 | { | ||
| 303 | } | ||
| 304 | static inline void register_nosave_region_late(unsigned long b, unsigned long e) | ||
| 305 | { | ||
| 306 | } | ||
| 307 | |||
| 308 | static inline void lock_system_sleep(void) {} | 303 | static inline void lock_system_sleep(void) {} |
| 309 | static inline void unlock_system_sleep(void) {} | 304 | static inline void unlock_system_sleep(void) {} |
| 310 | 305 | ||
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 461c0119664f..2b3831b58aa4 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
| @@ -58,6 +58,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
| 58 | UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ | 58 | UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ |
| 59 | UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ | 59 | UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ |
| 60 | UNEVICTABLE_MLOCKFREED, | 60 | UNEVICTABLE_MLOCKFREED, |
| 61 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 62 | THP_FAULT_ALLOC, | ||
| 63 | THP_FAULT_FALLBACK, | ||
| 64 | THP_COLLAPSE_ALLOC, | ||
| 65 | THP_COLLAPSE_ALLOC_FAILED, | ||
| 66 | THP_SPLIT, | ||
| 67 | #endif | ||
| 61 | NR_VM_EVENT_ITEMS | 68 | NR_VM_EVENT_ITEMS |
| 62 | }; | 69 | }; |
| 63 | 70 | ||
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index cdf2e8ac4309..d2df55b0c213 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h | |||
| @@ -139,8 +139,6 @@ do { \ | |||
| 139 | */ | 139 | */ |
| 140 | 140 | ||
| 141 | enum p9_msg_t { | 141 | enum p9_msg_t { |
| 142 | P9_TSYNCFS = 0, | ||
| 143 | P9_RSYNCFS, | ||
| 144 | P9_TLERROR = 6, | 142 | P9_TLERROR = 6, |
| 145 | P9_RLERROR, | 143 | P9_RLERROR, |
| 146 | P9_TSTATFS = 8, | 144 | P9_TSTATFS = 8, |
diff --git a/include/net/9p/client.h b/include/net/9p/client.h index 85c1413f054d..051a99f79769 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h | |||
| @@ -218,8 +218,8 @@ void p9_client_disconnect(struct p9_client *clnt); | |||
| 218 | void p9_client_begin_disconnect(struct p9_client *clnt); | 218 | void p9_client_begin_disconnect(struct p9_client *clnt); |
| 219 | struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, | 219 | struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, |
| 220 | char *uname, u32 n_uname, char *aname); | 220 | char *uname, u32 n_uname, char *aname); |
| 221 | struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, | 221 | struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname, |
| 222 | int clone); | 222 | char **wnames, int clone); |
| 223 | int p9_client_open(struct p9_fid *fid, int mode); | 223 | int p9_client_open(struct p9_fid *fid, int mode); |
| 224 | int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, | 224 | int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, |
| 225 | char *extension); | 225 | char *extension); |
| @@ -230,7 +230,6 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, | |||
| 230 | gid_t gid, struct p9_qid *qid); | 230 | gid_t gid, struct p9_qid *qid); |
| 231 | int p9_client_clunk(struct p9_fid *fid); | 231 | int p9_client_clunk(struct p9_fid *fid); |
| 232 | int p9_client_fsync(struct p9_fid *fid, int datasync); | 232 | int p9_client_fsync(struct p9_fid *fid, int datasync); |
| 233 | int p9_client_sync_fs(struct p9_fid *fid); | ||
| 234 | int p9_client_remove(struct p9_fid *fid); | 233 | int p9_client_remove(struct p9_fid *fid); |
| 235 | int p9_client_read(struct p9_fid *fid, char *data, char __user *udata, | 234 | int p9_client_read(struct p9_fid *fid, char *data, char __user *udata, |
| 236 | u64 offset, u32 count); | 235 | u64 offset, u32 count); |
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 78f18adb49c8..bf366547da25 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
| @@ -401,9 +401,9 @@ TRACE_EVENT(block_plug, | |||
| 401 | 401 | ||
| 402 | DECLARE_EVENT_CLASS(block_unplug, | 402 | DECLARE_EVENT_CLASS(block_unplug, |
| 403 | 403 | ||
| 404 | TP_PROTO(struct request_queue *q), | 404 | TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), |
| 405 | 405 | ||
| 406 | TP_ARGS(q), | 406 | TP_ARGS(q, depth, explicit), |
| 407 | 407 | ||
| 408 | TP_STRUCT__entry( | 408 | TP_STRUCT__entry( |
| 409 | __field( int, nr_rq ) | 409 | __field( int, nr_rq ) |
| @@ -411,7 +411,7 @@ DECLARE_EVENT_CLASS(block_unplug, | |||
| 411 | ), | 411 | ), |
| 412 | 412 | ||
| 413 | TP_fast_assign( | 413 | TP_fast_assign( |
| 414 | __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; | 414 | __entry->nr_rq = depth; |
| 415 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 415 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
| 416 | ), | 416 | ), |
| 417 | 417 | ||
| @@ -419,31 +419,19 @@ DECLARE_EVENT_CLASS(block_unplug, | |||
| 419 | ); | 419 | ); |
| 420 | 420 | ||
| 421 | /** | 421 | /** |
| 422 | * block_unplug_timer - timed release of operations requests in queue to device driver | 422 | * block_unplug - release of operations requests in request queue |
| 423 | * @q: request queue to unplug | ||
| 424 | * | ||
| 425 | * Unplug the request queue @q because a timer expired and allow block | ||
| 426 | * operation requests to be sent to the device driver. | ||
| 427 | */ | ||
| 428 | DEFINE_EVENT(block_unplug, block_unplug_timer, | ||
| 429 | |||
| 430 | TP_PROTO(struct request_queue *q), | ||
| 431 | |||
| 432 | TP_ARGS(q) | ||
| 433 | ); | ||
| 434 | |||
| 435 | /** | ||
| 436 | * block_unplug_io - release of operations requests in request queue | ||
| 437 | * @q: request queue to unplug | 423 | * @q: request queue to unplug |
| 424 | * @depth: number of requests just added to the queue | ||
| 425 | * @explicit: whether this was an explicit unplug, or one from schedule() | ||
| 438 | * | 426 | * |
| 439 | * Unplug request queue @q because device driver is scheduled to work | 427 | * Unplug request queue @q because device driver is scheduled to work |
| 440 | * on elements in the request queue. | 428 | * on elements in the request queue. |
| 441 | */ | 429 | */ |
| 442 | DEFINE_EVENT(block_unplug, block_unplug_io, | 430 | DEFINE_EVENT(block_unplug, block_unplug, |
| 443 | 431 | ||
| 444 | TP_PROTO(struct request_queue *q), | 432 | TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), |
| 445 | 433 | ||
| 446 | TP_ARGS(q) | 434 | TP_ARGS(q, depth, explicit) |
| 447 | ); | 435 | ); |
| 448 | 436 | ||
| 449 | /** | 437 | /** |
diff --git a/kernel/futex.c b/kernel/futex.c index dfb924ffe65b..fe28dc282eae 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -1886,7 +1886,7 @@ retry: | |||
| 1886 | restart->futex.val = val; | 1886 | restart->futex.val = val; |
| 1887 | restart->futex.time = abs_time->tv64; | 1887 | restart->futex.time = abs_time->tv64; |
| 1888 | restart->futex.bitset = bitset; | 1888 | restart->futex.bitset = bitset; |
| 1889 | restart->futex.flags = flags; | 1889 | restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; |
| 1890 | 1890 | ||
| 1891 | ret = -ERESTART_RESTARTBLOCK; | 1891 | ret = -ERESTART_RESTARTBLOCK; |
| 1892 | 1892 | ||
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 27960f114efd..8e81a9860a0d 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -364,6 +364,7 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | |||
| 364 | } | 364 | } |
| 365 | 365 | ||
| 366 | if (mode & PERF_CGROUP_SWIN) { | 366 | if (mode & PERF_CGROUP_SWIN) { |
| 367 | WARN_ON_ONCE(cpuctx->cgrp); | ||
| 367 | /* set cgrp before ctxsw in to | 368 | /* set cgrp before ctxsw in to |
| 368 | * allow event_filter_match() to not | 369 | * allow event_filter_match() to not |
| 369 | * have to pass task around | 370 | * have to pass task around |
| @@ -2423,6 +2424,14 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
| 2423 | if (!ctx || !ctx->nr_events) | 2424 | if (!ctx || !ctx->nr_events) |
| 2424 | goto out; | 2425 | goto out; |
| 2425 | 2426 | ||
| 2427 | /* | ||
| 2428 | * We must ctxsw out cgroup events to avoid conflict | ||
| 2429 | * when invoking perf_task_event_sched_in() later on | ||
| 2430 | * in this function. Otherwise we end up trying to | ||
| 2431 | * ctxswin cgroup events which are already scheduled | ||
| 2432 | * in. | ||
| 2433 | */ | ||
| 2434 | perf_cgroup_sched_out(current); | ||
| 2426 | task_ctx_sched_out(ctx, EVENT_ALL); | 2435 | task_ctx_sched_out(ctx, EVENT_ALL); |
| 2427 | 2436 | ||
| 2428 | raw_spin_lock(&ctx->lock); | 2437 | raw_spin_lock(&ctx->lock); |
| @@ -2447,6 +2456,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
| 2447 | 2456 | ||
| 2448 | raw_spin_unlock(&ctx->lock); | 2457 | raw_spin_unlock(&ctx->lock); |
| 2449 | 2458 | ||
| 2459 | /* | ||
| 2460 | * Also calls ctxswin for cgroup events, if any: | ||
| 2461 | */ | ||
| 2450 | perf_event_context_sched_in(ctx, ctx->task); | 2462 | perf_event_context_sched_in(ctx, ctx->task); |
| 2451 | out: | 2463 | out: |
| 2452 | local_irq_restore(flags); | 2464 | local_irq_restore(flags); |
diff --git a/kernel/pid.c b/kernel/pid.c index 02f221274265..57a8346a270e 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
| @@ -217,11 +217,14 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) | |||
| 217 | return -1; | 217 | return -1; |
| 218 | } | 218 | } |
| 219 | 219 | ||
| 220 | int next_pidmap(struct pid_namespace *pid_ns, int last) | 220 | int next_pidmap(struct pid_namespace *pid_ns, unsigned int last) |
| 221 | { | 221 | { |
| 222 | int offset; | 222 | int offset; |
| 223 | struct pidmap *map, *end; | 223 | struct pidmap *map, *end; |
| 224 | 224 | ||
| 225 | if (last >= PID_MAX_LIMIT) | ||
| 226 | return -1; | ||
| 227 | |||
| 225 | offset = (last + 1) & BITS_PER_PAGE_MASK; | 228 | offset = (last + 1) & BITS_PER_PAGE_MASK; |
| 226 | map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; | 229 | map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; |
| 227 | end = &pid_ns->pidmap[PIDMAP_ENTRIES]; | 230 | end = &pid_ns->pidmap[PIDMAP_ENTRIES]; |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 4603f08dc47b..6de9a8fc3417 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
| @@ -18,9 +18,13 @@ config SUSPEND_FREEZER | |||
| 18 | 18 | ||
| 19 | Turning OFF this setting is NOT recommended! If in doubt, say Y. | 19 | Turning OFF this setting is NOT recommended! If in doubt, say Y. |
| 20 | 20 | ||
| 21 | config HIBERNATE_CALLBACKS | ||
| 22 | bool | ||
| 23 | |||
| 21 | config HIBERNATION | 24 | config HIBERNATION |
| 22 | bool "Hibernation (aka 'suspend to disk')" | 25 | bool "Hibernation (aka 'suspend to disk')" |
| 23 | depends on SWAP && ARCH_HIBERNATION_POSSIBLE | 26 | depends on SWAP && ARCH_HIBERNATION_POSSIBLE |
| 27 | select HIBERNATE_CALLBACKS | ||
| 24 | select LZO_COMPRESS | 28 | select LZO_COMPRESS |
| 25 | select LZO_DECOMPRESS | 29 | select LZO_DECOMPRESS |
| 26 | ---help--- | 30 | ---help--- |
| @@ -85,7 +89,7 @@ config PM_STD_PARTITION | |||
| 85 | 89 | ||
| 86 | config PM_SLEEP | 90 | config PM_SLEEP |
| 87 | def_bool y | 91 | def_bool y |
| 88 | depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE | 92 | depends on SUSPEND || HIBERNATE_CALLBACKS |
| 89 | 93 | ||
| 90 | config PM_SLEEP_SMP | 94 | config PM_SLEEP_SMP |
| 91 | def_bool y | 95 | def_bool y |
diff --git a/kernel/sched.c b/kernel/sched.c index 48013633d792..312f8b95c2d4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -4111,20 +4111,20 @@ need_resched: | |||
| 4111 | try_to_wake_up_local(to_wakeup); | 4111 | try_to_wake_up_local(to_wakeup); |
| 4112 | } | 4112 | } |
| 4113 | deactivate_task(rq, prev, DEQUEUE_SLEEP); | 4113 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
| 4114 | |||
| 4115 | /* | ||
| 4116 | * If we are going to sleep and we have plugged IO queued, make | ||
| 4117 | * sure to submit it to avoid deadlocks. | ||
| 4118 | */ | ||
| 4119 | if (blk_needs_flush_plug(prev)) { | ||
| 4120 | raw_spin_unlock(&rq->lock); | ||
| 4121 | blk_schedule_flush_plug(prev); | ||
| 4122 | raw_spin_lock(&rq->lock); | ||
| 4123 | } | ||
| 4114 | } | 4124 | } |
| 4115 | switch_count = &prev->nvcsw; | 4125 | switch_count = &prev->nvcsw; |
| 4116 | } | 4126 | } |
| 4117 | 4127 | ||
| 4118 | /* | ||
| 4119 | * If we are going to sleep and we have plugged IO queued, make | ||
| 4120 | * sure to submit it to avoid deadlocks. | ||
| 4121 | */ | ||
| 4122 | if (prev->state != TASK_RUNNING && blk_needs_flush_plug(prev)) { | ||
| 4123 | raw_spin_unlock(&rq->lock); | ||
| 4124 | blk_flush_plug(prev); | ||
| 4125 | raw_spin_lock(&rq->lock); | ||
| 4126 | } | ||
| 4127 | |||
| 4128 | pre_schedule(rq, prev); | 4128 | pre_schedule(rq, prev); |
| 4129 | 4129 | ||
| 4130 | if (unlikely(!rq->nr_running)) | 4130 | if (unlikely(!rq->nr_running)) |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7f00772e57c9..6fa833ab2cb8 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -2104,21 +2104,20 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
| 2104 | enum cpu_idle_type idle, int *all_pinned, | 2104 | enum cpu_idle_type idle, int *all_pinned, |
| 2105 | int *this_best_prio, struct cfs_rq *busiest_cfs_rq) | 2105 | int *this_best_prio, struct cfs_rq *busiest_cfs_rq) |
| 2106 | { | 2106 | { |
| 2107 | int loops = 0, pulled = 0, pinned = 0; | 2107 | int loops = 0, pulled = 0; |
| 2108 | long rem_load_move = max_load_move; | 2108 | long rem_load_move = max_load_move; |
| 2109 | struct task_struct *p, *n; | 2109 | struct task_struct *p, *n; |
| 2110 | 2110 | ||
| 2111 | if (max_load_move == 0) | 2111 | if (max_load_move == 0) |
| 2112 | goto out; | 2112 | goto out; |
| 2113 | 2113 | ||
| 2114 | pinned = 1; | ||
| 2115 | |||
| 2116 | list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { | 2114 | list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { |
| 2117 | if (loops++ > sysctl_sched_nr_migrate) | 2115 | if (loops++ > sysctl_sched_nr_migrate) |
| 2118 | break; | 2116 | break; |
| 2119 | 2117 | ||
| 2120 | if ((p->se.load.weight >> 1) > rem_load_move || | 2118 | if ((p->se.load.weight >> 1) > rem_load_move || |
| 2121 | !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) | 2119 | !can_migrate_task(p, busiest, this_cpu, sd, idle, |
| 2120 | all_pinned)) | ||
| 2122 | continue; | 2121 | continue; |
| 2123 | 2122 | ||
| 2124 | pull_task(busiest, p, this_rq, this_cpu); | 2123 | pull_task(busiest, p, this_rq, this_cpu); |
| @@ -2153,9 +2152,6 @@ out: | |||
| 2153 | */ | 2152 | */ |
| 2154 | schedstat_add(sd, lb_gained[idle], pulled); | 2153 | schedstat_add(sd, lb_gained[idle], pulled); |
| 2155 | 2154 | ||
| 2156 | if (all_pinned) | ||
| 2157 | *all_pinned = pinned; | ||
| 2158 | |||
| 2159 | return max_load_move - rem_load_move; | 2155 | return max_load_move - rem_load_move; |
| 2160 | } | 2156 | } |
| 2161 | 2157 | ||
| @@ -3127,6 +3123,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 3127 | if (!sds.busiest || sds.busiest_nr_running == 0) | 3123 | if (!sds.busiest || sds.busiest_nr_running == 0) |
| 3128 | goto out_balanced; | 3124 | goto out_balanced; |
| 3129 | 3125 | ||
| 3126 | sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; | ||
| 3127 | |||
| 3130 | /* | 3128 | /* |
| 3131 | * If the busiest group is imbalanced the below checks don't | 3129 | * If the busiest group is imbalanced the below checks don't |
| 3132 | * work because they assumes all things are equal, which typically | 3130 | * work because they assumes all things are equal, which typically |
| @@ -3151,7 +3149,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 3151 | * Don't pull any tasks if this group is already above the domain | 3149 | * Don't pull any tasks if this group is already above the domain |
| 3152 | * average load. | 3150 | * average load. |
| 3153 | */ | 3151 | */ |
| 3154 | sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; | ||
| 3155 | if (sds.this_load >= sds.avg_load) | 3152 | if (sds.this_load >= sds.avg_load) |
| 3156 | goto out_balanced; | 3153 | goto out_balanced; |
| 3157 | 3154 | ||
| @@ -3340,6 +3337,7 @@ redo: | |||
| 3340 | * still unbalanced. ld_moved simply stays zero, so it is | 3337 | * still unbalanced. ld_moved simply stays zero, so it is |
| 3341 | * correctly treated as an imbalance. | 3338 | * correctly treated as an imbalance. |
| 3342 | */ | 3339 | */ |
| 3340 | all_pinned = 1; | ||
| 3343 | local_irq_save(flags); | 3341 | local_irq_save(flags); |
| 3344 | double_rq_lock(this_rq, busiest); | 3342 | double_rq_lock(this_rq, busiest); |
| 3345 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | 3343 | ld_moved = move_tasks(this_rq, this_cpu, busiest, |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 7aa40f8e182d..6957aa298dfa 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -850,29 +850,21 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q) | |||
| 850 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); | 850 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); |
| 851 | } | 851 | } |
| 852 | 852 | ||
| 853 | static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q) | 853 | static void blk_add_trace_unplug(void *ignore, struct request_queue *q, |
| 854 | unsigned int depth, bool explicit) | ||
| 854 | { | 855 | { |
| 855 | struct blk_trace *bt = q->blk_trace; | 856 | struct blk_trace *bt = q->blk_trace; |
| 856 | 857 | ||
| 857 | if (bt) { | 858 | if (bt) { |
| 858 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | 859 | __be64 rpdu = cpu_to_be64(depth); |
| 859 | __be64 rpdu = cpu_to_be64(pdu); | 860 | u32 what; |
| 860 | 861 | ||
| 861 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, | 862 | if (explicit) |
| 862 | sizeof(rpdu), &rpdu); | 863 | what = BLK_TA_UNPLUG_IO; |
| 863 | } | 864 | else |
| 864 | } | 865 | what = BLK_TA_UNPLUG_TIMER; |
| 865 | |||
| 866 | static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q) | ||
| 867 | { | ||
| 868 | struct blk_trace *bt = q->blk_trace; | ||
| 869 | |||
| 870 | if (bt) { | ||
| 871 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | ||
| 872 | __be64 rpdu = cpu_to_be64(pdu); | ||
| 873 | 866 | ||
| 874 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0, | 867 | __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); |
| 875 | sizeof(rpdu), &rpdu); | ||
| 876 | } | 868 | } |
| 877 | } | 869 | } |
| 878 | 870 | ||
| @@ -1015,9 +1007,7 @@ static void blk_register_tracepoints(void) | |||
| 1015 | WARN_ON(ret); | 1007 | WARN_ON(ret); |
| 1016 | ret = register_trace_block_plug(blk_add_trace_plug, NULL); | 1008 | ret = register_trace_block_plug(blk_add_trace_plug, NULL); |
| 1017 | WARN_ON(ret); | 1009 | WARN_ON(ret); |
| 1018 | ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); | 1010 | ret = register_trace_block_unplug(blk_add_trace_unplug, NULL); |
| 1019 | WARN_ON(ret); | ||
| 1020 | ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); | ||
| 1021 | WARN_ON(ret); | 1011 | WARN_ON(ret); |
| 1022 | ret = register_trace_block_split(blk_add_trace_split, NULL); | 1012 | ret = register_trace_block_split(blk_add_trace_split, NULL); |
| 1023 | WARN_ON(ret); | 1013 | WARN_ON(ret); |
| @@ -1032,8 +1022,7 @@ static void blk_unregister_tracepoints(void) | |||
| 1032 | unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); | 1022 | unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); |
| 1033 | unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); | 1023 | unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); |
| 1034 | unregister_trace_block_split(blk_add_trace_split, NULL); | 1024 | unregister_trace_block_split(blk_add_trace_split, NULL); |
| 1035 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); | 1025 | unregister_trace_block_unplug(blk_add_trace_unplug, NULL); |
| 1036 | unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); | ||
| 1037 | unregister_trace_block_plug(blk_add_trace_plug, NULL); | 1026 | unregister_trace_block_plug(blk_add_trace_plug, NULL); |
| 1038 | unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); | 1027 | unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); |
| 1039 | unregister_trace_block_getrq(blk_add_trace_getrq, NULL); | 1028 | unregister_trace_block_getrq(blk_add_trace_getrq, NULL); |
diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 05672e819f8c..a235f3cc471c 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c | |||
| @@ -49,12 +49,9 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) | |||
| 49 | val = *s - '0'; | 49 | val = *s - '0'; |
| 50 | else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f') | 50 | else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f') |
| 51 | val = _tolower(*s) - 'a' + 10; | 51 | val = _tolower(*s) - 'a' + 10; |
| 52 | else if (*s == '\n') { | 52 | else if (*s == '\n' && *(s + 1) == '\0') |
| 53 | if (*(s + 1) == '\0') | 53 | break; |
| 54 | break; | 54 | else |
| 55 | else | ||
| 56 | return -EINVAL; | ||
| 57 | } else | ||
| 58 | return -EINVAL; | 55 | return -EINVAL; |
| 59 | 56 | ||
| 60 | if (val >= base) | 57 | if (val >= base) |
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c index 325c2f9ecebd..d55769d63cb8 100644 --- a/lib/test-kstrtox.c +++ b/lib/test-kstrtox.c | |||
| @@ -315,12 +315,12 @@ static void __init test_kstrtou64_ok(void) | |||
| 315 | {"65537", 10, 65537}, | 315 | {"65537", 10, 65537}, |
| 316 | {"2147483646", 10, 2147483646}, | 316 | {"2147483646", 10, 2147483646}, |
| 317 | {"2147483647", 10, 2147483647}, | 317 | {"2147483647", 10, 2147483647}, |
| 318 | {"2147483648", 10, 2147483648}, | 318 | {"2147483648", 10, 2147483648ULL}, |
| 319 | {"2147483649", 10, 2147483649}, | 319 | {"2147483649", 10, 2147483649ULL}, |
| 320 | {"4294967294", 10, 4294967294}, | 320 | {"4294967294", 10, 4294967294ULL}, |
| 321 | {"4294967295", 10, 4294967295}, | 321 | {"4294967295", 10, 4294967295ULL}, |
| 322 | {"4294967296", 10, 4294967296}, | 322 | {"4294967296", 10, 4294967296ULL}, |
| 323 | {"4294967297", 10, 4294967297}, | 323 | {"4294967297", 10, 4294967297ULL}, |
| 324 | {"9223372036854775806", 10, 9223372036854775806ULL}, | 324 | {"9223372036854775806", 10, 9223372036854775806ULL}, |
| 325 | {"9223372036854775807", 10, 9223372036854775807ULL}, | 325 | {"9223372036854775807", 10, 9223372036854775807ULL}, |
| 326 | {"9223372036854775808", 10, 9223372036854775808ULL}, | 326 | {"9223372036854775808", 10, 9223372036854775808ULL}, |
| @@ -369,12 +369,12 @@ static void __init test_kstrtos64_ok(void) | |||
| 369 | {"65537", 10, 65537}, | 369 | {"65537", 10, 65537}, |
| 370 | {"2147483646", 10, 2147483646}, | 370 | {"2147483646", 10, 2147483646}, |
| 371 | {"2147483647", 10, 2147483647}, | 371 | {"2147483647", 10, 2147483647}, |
| 372 | {"2147483648", 10, 2147483648}, | 372 | {"2147483648", 10, 2147483648LL}, |
| 373 | {"2147483649", 10, 2147483649}, | 373 | {"2147483649", 10, 2147483649LL}, |
| 374 | {"4294967294", 10, 4294967294}, | 374 | {"4294967294", 10, 4294967294LL}, |
| 375 | {"4294967295", 10, 4294967295}, | 375 | {"4294967295", 10, 4294967295LL}, |
| 376 | {"4294967296", 10, 4294967296}, | 376 | {"4294967296", 10, 4294967296LL}, |
| 377 | {"4294967297", 10, 4294967297}, | 377 | {"4294967297", 10, 4294967297LL}, |
| 378 | {"9223372036854775806", 10, 9223372036854775806LL}, | 378 | {"9223372036854775806", 10, 9223372036854775806LL}, |
| 379 | {"9223372036854775807", 10, 9223372036854775807LL}, | 379 | {"9223372036854775807", 10, 9223372036854775807LL}, |
| 380 | }; | 380 | }; |
| @@ -418,10 +418,10 @@ static void __init test_kstrtou32_ok(void) | |||
| 418 | {"65537", 10, 65537}, | 418 | {"65537", 10, 65537}, |
| 419 | {"2147483646", 10, 2147483646}, | 419 | {"2147483646", 10, 2147483646}, |
| 420 | {"2147483647", 10, 2147483647}, | 420 | {"2147483647", 10, 2147483647}, |
| 421 | {"2147483648", 10, 2147483648}, | 421 | {"2147483648", 10, 2147483648U}, |
| 422 | {"2147483649", 10, 2147483649}, | 422 | {"2147483649", 10, 2147483649U}, |
| 423 | {"4294967294", 10, 4294967294}, | 423 | {"4294967294", 10, 4294967294U}, |
| 424 | {"4294967295", 10, 4294967295}, | 424 | {"4294967295", 10, 4294967295U}, |
| 425 | }; | 425 | }; |
| 426 | TEST_OK(kstrtou32, u32, "%u", test_u32_ok); | 426 | TEST_OK(kstrtou32, u32, "%u", test_u32_ok); |
| 427 | } | 427 | } |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0a619e0e2e0b..470dcda10add 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -244,24 +244,28 @@ static ssize_t single_flag_show(struct kobject *kobj, | |||
| 244 | struct kobj_attribute *attr, char *buf, | 244 | struct kobj_attribute *attr, char *buf, |
| 245 | enum transparent_hugepage_flag flag) | 245 | enum transparent_hugepage_flag flag) |
| 246 | { | 246 | { |
| 247 | if (test_bit(flag, &transparent_hugepage_flags)) | 247 | return sprintf(buf, "%d\n", |
| 248 | return sprintf(buf, "[yes] no\n"); | 248 | !!test_bit(flag, &transparent_hugepage_flags)); |
| 249 | else | ||
| 250 | return sprintf(buf, "yes [no]\n"); | ||
| 251 | } | 249 | } |
| 250 | |||
| 252 | static ssize_t single_flag_store(struct kobject *kobj, | 251 | static ssize_t single_flag_store(struct kobject *kobj, |
| 253 | struct kobj_attribute *attr, | 252 | struct kobj_attribute *attr, |
| 254 | const char *buf, size_t count, | 253 | const char *buf, size_t count, |
| 255 | enum transparent_hugepage_flag flag) | 254 | enum transparent_hugepage_flag flag) |
| 256 | { | 255 | { |
| 257 | if (!memcmp("yes", buf, | 256 | unsigned long value; |
| 258 | min(sizeof("yes")-1, count))) { | 257 | int ret; |
| 258 | |||
| 259 | ret = kstrtoul(buf, 10, &value); | ||
| 260 | if (ret < 0) | ||
| 261 | return ret; | ||
| 262 | if (value > 1) | ||
| 263 | return -EINVAL; | ||
| 264 | |||
| 265 | if (value) | ||
| 259 | set_bit(flag, &transparent_hugepage_flags); | 266 | set_bit(flag, &transparent_hugepage_flags); |
| 260 | } else if (!memcmp("no", buf, | 267 | else |
| 261 | min(sizeof("no")-1, count))) { | ||
| 262 | clear_bit(flag, &transparent_hugepage_flags); | 268 | clear_bit(flag, &transparent_hugepage_flags); |
| 263 | } else | ||
| 264 | return -EINVAL; | ||
| 265 | 269 | ||
| 266 | return count; | 270 | return count; |
| 267 | } | 271 | } |
| @@ -680,8 +684,11 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 680 | return VM_FAULT_OOM; | 684 | return VM_FAULT_OOM; |
| 681 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), | 685 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), |
| 682 | vma, haddr, numa_node_id(), 0); | 686 | vma, haddr, numa_node_id(), 0); |
| 683 | if (unlikely(!page)) | 687 | if (unlikely(!page)) { |
| 688 | count_vm_event(THP_FAULT_FALLBACK); | ||
| 684 | goto out; | 689 | goto out; |
| 690 | } | ||
| 691 | count_vm_event(THP_FAULT_ALLOC); | ||
| 685 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { | 692 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { |
| 686 | put_page(page); | 693 | put_page(page); |
| 687 | goto out; | 694 | goto out; |
| @@ -909,11 +916,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 909 | new_page = NULL; | 916 | new_page = NULL; |
| 910 | 917 | ||
| 911 | if (unlikely(!new_page)) { | 918 | if (unlikely(!new_page)) { |
| 919 | count_vm_event(THP_FAULT_FALLBACK); | ||
| 912 | ret = do_huge_pmd_wp_page_fallback(mm, vma, address, | 920 | ret = do_huge_pmd_wp_page_fallback(mm, vma, address, |
| 913 | pmd, orig_pmd, page, haddr); | 921 | pmd, orig_pmd, page, haddr); |
| 914 | put_page(page); | 922 | put_page(page); |
| 915 | goto out; | 923 | goto out; |
| 916 | } | 924 | } |
| 925 | count_vm_event(THP_FAULT_ALLOC); | ||
| 917 | 926 | ||
| 918 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { | 927 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { |
| 919 | put_page(new_page); | 928 | put_page(new_page); |
| @@ -1390,6 +1399,7 @@ int split_huge_page(struct page *page) | |||
| 1390 | 1399 | ||
| 1391 | BUG_ON(!PageSwapBacked(page)); | 1400 | BUG_ON(!PageSwapBacked(page)); |
| 1392 | __split_huge_page(page, anon_vma); | 1401 | __split_huge_page(page, anon_vma); |
| 1402 | count_vm_event(THP_SPLIT); | ||
| 1393 | 1403 | ||
| 1394 | BUG_ON(PageCompound(page)); | 1404 | BUG_ON(PageCompound(page)); |
| 1395 | out_unlock: | 1405 | out_unlock: |
| @@ -1784,9 +1794,11 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
| 1784 | node, __GFP_OTHER_NODE); | 1794 | node, __GFP_OTHER_NODE); |
| 1785 | if (unlikely(!new_page)) { | 1795 | if (unlikely(!new_page)) { |
| 1786 | up_read(&mm->mmap_sem); | 1796 | up_read(&mm->mmap_sem); |
| 1797 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
| 1787 | *hpage = ERR_PTR(-ENOMEM); | 1798 | *hpage = ERR_PTR(-ENOMEM); |
| 1788 | return; | 1799 | return; |
| 1789 | } | 1800 | } |
| 1801 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
| 1790 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { | 1802 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { |
| 1791 | up_read(&mm->mmap_sem); | 1803 | up_read(&mm->mmap_sem); |
| 1792 | put_page(new_page); | 1804 | put_page(new_page); |
| @@ -2151,8 +2163,11 @@ static void khugepaged_do_scan(struct page **hpage) | |||
| 2151 | #ifndef CONFIG_NUMA | 2163 | #ifndef CONFIG_NUMA |
| 2152 | if (!*hpage) { | 2164 | if (!*hpage) { |
| 2153 | *hpage = alloc_hugepage(khugepaged_defrag()); | 2165 | *hpage = alloc_hugepage(khugepaged_defrag()); |
| 2154 | if (unlikely(!*hpage)) | 2166 | if (unlikely(!*hpage)) { |
| 2167 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
| 2155 | break; | 2168 | break; |
| 2169 | } | ||
| 2170 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
| 2156 | } | 2171 | } |
| 2157 | #else | 2172 | #else |
| 2158 | if (IS_ERR(*hpage)) | 2173 | if (IS_ERR(*hpage)) |
| @@ -2192,8 +2207,11 @@ static struct page *khugepaged_alloc_hugepage(void) | |||
| 2192 | 2207 | ||
| 2193 | do { | 2208 | do { |
| 2194 | hpage = alloc_hugepage(khugepaged_defrag()); | 2209 | hpage = alloc_hugepage(khugepaged_defrag()); |
| 2195 | if (!hpage) | 2210 | if (!hpage) { |
| 2211 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
| 2196 | khugepaged_alloc_sleep(); | 2212 | khugepaged_alloc_sleep(); |
| 2213 | } else | ||
| 2214 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
| 2197 | } while (unlikely(!hpage) && | 2215 | } while (unlikely(!hpage) && |
| 2198 | likely(khugepaged_enabled())); | 2216 | likely(khugepaged_enabled())); |
| 2199 | return hpage; | 2217 | return hpage; |
| @@ -2210,8 +2228,11 @@ static void khugepaged_loop(void) | |||
| 2210 | while (likely(khugepaged_enabled())) { | 2228 | while (likely(khugepaged_enabled())) { |
| 2211 | #ifndef CONFIG_NUMA | 2229 | #ifndef CONFIG_NUMA |
| 2212 | hpage = khugepaged_alloc_hugepage(); | 2230 | hpage = khugepaged_alloc_hugepage(); |
| 2213 | if (unlikely(!hpage)) | 2231 | if (unlikely(!hpage)) { |
| 2232 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
| 2214 | break; | 2233 | break; |
| 2234 | } | ||
| 2235 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
| 2215 | #else | 2236 | #else |
| 2216 | if (IS_ERR(hpage)) { | 2237 | if (IS_ERR(hpage)) { |
| 2217 | khugepaged_alloc_sleep(); | 2238 | khugepaged_alloc_sleep(); |
diff --git a/mm/memory.c b/mm/memory.c index 9da8cab1b1b0..ce22a250926f 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -1410,6 +1410,13 @@ no_page_table: | |||
| 1410 | return page; | 1410 | return page; |
| 1411 | } | 1411 | } |
| 1412 | 1412 | ||
| 1413 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) | ||
| 1414 | { | ||
| 1415 | return (vma->vm_flags & VM_GROWSDOWN) && | ||
| 1416 | (vma->vm_start == addr) && | ||
| 1417 | !vma_stack_continue(vma->vm_prev, addr); | ||
| 1418 | } | ||
| 1419 | |||
| 1413 | /** | 1420 | /** |
| 1414 | * __get_user_pages() - pin user pages in memory | 1421 | * __get_user_pages() - pin user pages in memory |
| 1415 | * @tsk: task_struct of target task | 1422 | * @tsk: task_struct of target task |
| @@ -1488,7 +1495,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1488 | vma = find_extend_vma(mm, start); | 1495 | vma = find_extend_vma(mm, start); |
| 1489 | if (!vma && in_gate_area(mm, start)) { | 1496 | if (!vma && in_gate_area(mm, start)) { |
| 1490 | unsigned long pg = start & PAGE_MASK; | 1497 | unsigned long pg = start & PAGE_MASK; |
| 1491 | struct vm_area_struct *gate_vma = get_gate_vma(mm); | ||
| 1492 | pgd_t *pgd; | 1498 | pgd_t *pgd; |
| 1493 | pud_t *pud; | 1499 | pud_t *pud; |
| 1494 | pmd_t *pmd; | 1500 | pmd_t *pmd; |
| @@ -1513,10 +1519,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1513 | pte_unmap(pte); | 1519 | pte_unmap(pte); |
| 1514 | return i ? : -EFAULT; | 1520 | return i ? : -EFAULT; |
| 1515 | } | 1521 | } |
| 1522 | vma = get_gate_vma(mm); | ||
| 1516 | if (pages) { | 1523 | if (pages) { |
| 1517 | struct page *page; | 1524 | struct page *page; |
| 1518 | 1525 | ||
| 1519 | page = vm_normal_page(gate_vma, start, *pte); | 1526 | page = vm_normal_page(vma, start, *pte); |
| 1520 | if (!page) { | 1527 | if (!page) { |
| 1521 | if (!(gup_flags & FOLL_DUMP) && | 1528 | if (!(gup_flags & FOLL_DUMP) && |
| 1522 | is_zero_pfn(pte_pfn(*pte))) | 1529 | is_zero_pfn(pte_pfn(*pte))) |
| @@ -1530,12 +1537,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1530 | get_page(page); | 1537 | get_page(page); |
| 1531 | } | 1538 | } |
| 1532 | pte_unmap(pte); | 1539 | pte_unmap(pte); |
| 1533 | if (vmas) | 1540 | goto next_page; |
| 1534 | vmas[i] = gate_vma; | ||
| 1535 | i++; | ||
| 1536 | start += PAGE_SIZE; | ||
| 1537 | nr_pages--; | ||
| 1538 | continue; | ||
| 1539 | } | 1541 | } |
| 1540 | 1542 | ||
| 1541 | if (!vma || | 1543 | if (!vma || |
| @@ -1549,6 +1551,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1549 | continue; | 1551 | continue; |
| 1550 | } | 1552 | } |
| 1551 | 1553 | ||
| 1554 | /* | ||
| 1555 | * If we don't actually want the page itself, | ||
| 1556 | * and it's the stack guard page, just skip it. | ||
| 1557 | */ | ||
| 1558 | if (!pages && stack_guard_page(vma, start)) | ||
| 1559 | goto next_page; | ||
| 1560 | |||
| 1552 | do { | 1561 | do { |
| 1553 | struct page *page; | 1562 | struct page *page; |
| 1554 | unsigned int foll_flags = gup_flags; | 1563 | unsigned int foll_flags = gup_flags; |
| @@ -1631,6 +1640,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1631 | flush_anon_page(vma, page, start); | 1640 | flush_anon_page(vma, page, start); |
| 1632 | flush_dcache_page(page); | 1641 | flush_dcache_page(page); |
| 1633 | } | 1642 | } |
| 1643 | next_page: | ||
| 1634 | if (vmas) | 1644 | if (vmas) |
| 1635 | vmas[i] = vma; | 1645 | vmas[i] = vma; |
| 1636 | i++; | 1646 | i++; |
| @@ -3678,7 +3688,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | |||
| 3678 | */ | 3688 | */ |
| 3679 | #ifdef CONFIG_HAVE_IOREMAP_PROT | 3689 | #ifdef CONFIG_HAVE_IOREMAP_PROT |
| 3680 | vma = find_vma(mm, addr); | 3690 | vma = find_vma(mm, addr); |
| 3681 | if (!vma) | 3691 | if (!vma || vma->vm_start > addr) |
| 3682 | break; | 3692 | break; |
| 3683 | if (vma->vm_ops && vma->vm_ops->access) | 3693 | if (vma->vm_ops && vma->vm_ops->access) |
| 3684 | ret = vma->vm_ops->access(vma, addr, buf, | 3694 | ret = vma->vm_ops->access(vma, addr, buf, |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a2acaf820fe5..9ca1d604f7cd 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -375,7 +375,7 @@ void online_page(struct page *page) | |||
| 375 | #endif | 375 | #endif |
| 376 | 376 | ||
| 377 | #ifdef CONFIG_FLATMEM | 377 | #ifdef CONFIG_FLATMEM |
| 378 | max_mapnr = max(page_to_pfn(page), max_mapnr); | 378 | max_mapnr = max(pfn, max_mapnr); |
| 379 | #endif | 379 | #endif |
| 380 | 380 | ||
| 381 | ClearPageReserved(page); | 381 | ClearPageReserved(page); |
diff --git a/mm/mlock.c b/mm/mlock.c index 2689a08c79af..6b55e3efe0df 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
| @@ -135,13 +135,6 @@ void munlock_vma_page(struct page *page) | |||
| 135 | } | 135 | } |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) | ||
| 139 | { | ||
| 140 | return (vma->vm_flags & VM_GROWSDOWN) && | ||
| 141 | (vma->vm_start == addr) && | ||
| 142 | !vma_stack_continue(vma->vm_prev, addr); | ||
| 143 | } | ||
| 144 | |||
| 145 | /** | 138 | /** |
| 146 | * __mlock_vma_pages_range() - mlock a range of pages in the vma. | 139 | * __mlock_vma_pages_range() - mlock a range of pages in the vma. |
| 147 | * @vma: target vma | 140 | * @vma: target vma |
| @@ -188,12 +181,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, | |||
| 188 | if (vma->vm_flags & VM_LOCKED) | 181 | if (vma->vm_flags & VM_LOCKED) |
| 189 | gup_flags |= FOLL_MLOCK; | 182 | gup_flags |= FOLL_MLOCK; |
| 190 | 183 | ||
| 191 | /* We don't try to access the guard page of a stack vma */ | ||
| 192 | if (stack_guard_page(vma, start)) { | ||
| 193 | addr += PAGE_SIZE; | ||
| 194 | nr_pages--; | ||
| 195 | } | ||
| 196 | |||
| 197 | return __get_user_pages(current, mm, addr, nr_pages, gup_flags, | 184 | return __get_user_pages(current, mm, addr, nr_pages, gup_flags, |
| 198 | NULL, NULL, nonblocking); | 185 | NULL, NULL, nonblocking); |
| 199 | } | 186 | } |
| @@ -259,7 +259,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) | |||
| 259 | * randomize_va_space to 2, which will still cause mm->start_brk | 259 | * randomize_va_space to 2, which will still cause mm->start_brk |
| 260 | * to be arbitrarily shifted | 260 | * to be arbitrarily shifted |
| 261 | */ | 261 | */ |
| 262 | if (mm->start_brk > PAGE_ALIGN(mm->end_data)) | 262 | if (current->brk_randomized) |
| 263 | min_brk = mm->start_brk; | 263 | min_brk = mm->start_brk; |
| 264 | else | 264 | else |
| 265 | min_brk = mm->end_data; | 265 | min_brk = mm->end_data; |
| @@ -1814,11 +1814,14 @@ static int expand_downwards(struct vm_area_struct *vma, | |||
| 1814 | size = vma->vm_end - address; | 1814 | size = vma->vm_end - address; |
| 1815 | grow = (vma->vm_start - address) >> PAGE_SHIFT; | 1815 | grow = (vma->vm_start - address) >> PAGE_SHIFT; |
| 1816 | 1816 | ||
| 1817 | error = acct_stack_growth(vma, size, grow); | 1817 | error = -ENOMEM; |
| 1818 | if (!error) { | 1818 | if (grow <= vma->vm_pgoff) { |
| 1819 | vma->vm_start = address; | 1819 | error = acct_stack_growth(vma, size, grow); |
| 1820 | vma->vm_pgoff -= grow; | 1820 | if (!error) { |
| 1821 | perf_event_mmap(vma); | 1821 | vma->vm_start = address; |
| 1822 | vma->vm_pgoff -= grow; | ||
| 1823 | perf_event_mmap(vma); | ||
| 1824 | } | ||
| 1822 | } | 1825 | } |
| 1823 | } | 1826 | } |
| 1824 | vma_unlock_anon_vma(vma); | 1827 | vma_unlock_anon_vma(vma); |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 6a819d1b2c7d..83fb72c108b7 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
| @@ -84,24 +84,6 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk, | |||
| 84 | #endif /* CONFIG_NUMA */ | 84 | #endif /* CONFIG_NUMA */ |
| 85 | 85 | ||
| 86 | /* | 86 | /* |
| 87 | * If this is a system OOM (not a memcg OOM) and the task selected to be | ||
| 88 | * killed is not already running at high (RT) priorities, speed up the | ||
| 89 | * recovery by boosting the dying task to the lowest FIFO priority. | ||
| 90 | * That helps with the recovery and avoids interfering with RT tasks. | ||
| 91 | */ | ||
| 92 | static void boost_dying_task_prio(struct task_struct *p, | ||
| 93 | struct mem_cgroup *mem) | ||
| 94 | { | ||
| 95 | struct sched_param param = { .sched_priority = 1 }; | ||
| 96 | |||
| 97 | if (mem) | ||
| 98 | return; | ||
| 99 | |||
| 100 | if (!rt_task(p)) | ||
| 101 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | ||
| 102 | } | ||
| 103 | |||
| 104 | /* | ||
| 105 | * The process p may have detached its own ->mm while exiting or through | 87 | * The process p may have detached its own ->mm while exiting or through |
| 106 | * use_mm(), but one or more of its subthreads may still have a valid | 88 | * use_mm(), but one or more of its subthreads may still have a valid |
| 107 | * pointer. Return p, or any of its subthreads with a valid ->mm, with | 89 | * pointer. Return p, or any of its subthreads with a valid ->mm, with |
| @@ -452,13 +434,6 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) | |||
| 452 | set_tsk_thread_flag(p, TIF_MEMDIE); | 434 | set_tsk_thread_flag(p, TIF_MEMDIE); |
| 453 | force_sig(SIGKILL, p); | 435 | force_sig(SIGKILL, p); |
| 454 | 436 | ||
| 455 | /* | ||
| 456 | * We give our sacrificial lamb high priority and access to | ||
| 457 | * all the memory it needs. That way it should be able to | ||
| 458 | * exit() and clear out its resources quickly... | ||
| 459 | */ | ||
| 460 | boost_dying_task_prio(p, mem); | ||
| 461 | |||
| 462 | return 0; | 437 | return 0; |
| 463 | } | 438 | } |
| 464 | #undef K | 439 | #undef K |
| @@ -482,7 +457,6 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
| 482 | */ | 457 | */ |
| 483 | if (p->flags & PF_EXITING) { | 458 | if (p->flags & PF_EXITING) { |
| 484 | set_tsk_thread_flag(p, TIF_MEMDIE); | 459 | set_tsk_thread_flag(p, TIF_MEMDIE); |
| 485 | boost_dying_task_prio(p, mem); | ||
| 486 | return 0; | 460 | return 0; |
| 487 | } | 461 | } |
| 488 | 462 | ||
| @@ -556,7 +530,6 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) | |||
| 556 | */ | 530 | */ |
| 557 | if (fatal_signal_pending(current)) { | 531 | if (fatal_signal_pending(current)) { |
| 558 | set_thread_flag(TIF_MEMDIE); | 532 | set_thread_flag(TIF_MEMDIE); |
| 559 | boost_dying_task_prio(current, NULL); | ||
| 560 | return; | 533 | return; |
| 561 | } | 534 | } |
| 562 | 535 | ||
| @@ -712,7 +685,6 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | |||
| 712 | */ | 685 | */ |
| 713 | if (fatal_signal_pending(current)) { | 686 | if (fatal_signal_pending(current)) { |
| 714 | set_thread_flag(TIF_MEMDIE); | 687 | set_thread_flag(TIF_MEMDIE); |
| 715 | boost_dying_task_prio(current, NULL); | ||
| 716 | return; | 688 | return; |
| 717 | } | 689 | } |
| 718 | 690 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2747f5e5abc1..9f8a97b9a350 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -3176,7 +3176,7 @@ static __init_refok int __build_all_zonelists(void *data) | |||
| 3176 | * Called with zonelists_mutex held always | 3176 | * Called with zonelists_mutex held always |
| 3177 | * unless system_state == SYSTEM_BOOTING. | 3177 | * unless system_state == SYSTEM_BOOTING. |
| 3178 | */ | 3178 | */ |
| 3179 | void build_all_zonelists(void *data) | 3179 | void __ref build_all_zonelists(void *data) |
| 3180 | { | 3180 | { |
| 3181 | set_zonelist_order(); | 3181 | set_zonelist_order(); |
| 3182 | 3182 | ||
diff --git a/mm/shmem.c b/mm/shmem.c index 58da7c150ba6..8fa27e4e582a 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
| @@ -421,7 +421,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long | |||
| 421 | * a waste to allocate index if we cannot allocate data. | 421 | * a waste to allocate index if we cannot allocate data. |
| 422 | */ | 422 | */ |
| 423 | if (sbinfo->max_blocks) { | 423 | if (sbinfo->max_blocks) { |
| 424 | if (percpu_counter_compare(&sbinfo->used_blocks, (sbinfo->max_blocks - 1)) > 0) | 424 | if (percpu_counter_compare(&sbinfo->used_blocks, |
| 425 | sbinfo->max_blocks - 1) >= 0) | ||
| 425 | return ERR_PTR(-ENOSPC); | 426 | return ERR_PTR(-ENOSPC); |
| 426 | percpu_counter_inc(&sbinfo->used_blocks); | 427 | percpu_counter_inc(&sbinfo->used_blocks); |
| 427 | spin_lock(&inode->i_lock); | 428 | spin_lock(&inode->i_lock); |
| @@ -1397,7 +1398,8 @@ repeat: | |||
| 1397 | shmem_swp_unmap(entry); | 1398 | shmem_swp_unmap(entry); |
| 1398 | sbinfo = SHMEM_SB(inode->i_sb); | 1399 | sbinfo = SHMEM_SB(inode->i_sb); |
| 1399 | if (sbinfo->max_blocks) { | 1400 | if (sbinfo->max_blocks) { |
| 1400 | if ((percpu_counter_compare(&sbinfo->used_blocks, sbinfo->max_blocks) > 0) || | 1401 | if (percpu_counter_compare(&sbinfo->used_blocks, |
| 1402 | sbinfo->max_blocks) >= 0 || | ||
| 1401 | shmem_acct_block(info->flags)) { | 1403 | shmem_acct_block(info->flags)) { |
| 1402 | spin_unlock(&info->lock); | 1404 | spin_unlock(&info->lock); |
| 1403 | error = -ENOSPC; | 1405 | error = -ENOSPC; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index c7f5a6d4b75b..f6b435c80079 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -41,6 +41,7 @@ | |||
| 41 | #include <linux/memcontrol.h> | 41 | #include <linux/memcontrol.h> |
| 42 | #include <linux/delayacct.h> | 42 | #include <linux/delayacct.h> |
| 43 | #include <linux/sysctl.h> | 43 | #include <linux/sysctl.h> |
| 44 | #include <linux/oom.h> | ||
| 44 | 45 | ||
| 45 | #include <asm/tlbflush.h> | 46 | #include <asm/tlbflush.h> |
| 46 | #include <asm/div64.h> | 47 | #include <asm/div64.h> |
| @@ -1988,17 +1989,12 @@ static bool zone_reclaimable(struct zone *zone) | |||
| 1988 | return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; | 1989 | return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; |
| 1989 | } | 1990 | } |
| 1990 | 1991 | ||
| 1991 | /* | 1992 | /* All zones in zonelist are unreclaimable? */ |
| 1992 | * As hibernation is going on, kswapd is freezed so that it can't mark | ||
| 1993 | * the zone into all_unreclaimable. It can't handle OOM during hibernation. | ||
| 1994 | * So let's check zone's unreclaimable in direct reclaim as well as kswapd. | ||
| 1995 | */ | ||
| 1996 | static bool all_unreclaimable(struct zonelist *zonelist, | 1993 | static bool all_unreclaimable(struct zonelist *zonelist, |
| 1997 | struct scan_control *sc) | 1994 | struct scan_control *sc) |
| 1998 | { | 1995 | { |
| 1999 | struct zoneref *z; | 1996 | struct zoneref *z; |
| 2000 | struct zone *zone; | 1997 | struct zone *zone; |
| 2001 | bool all_unreclaimable = true; | ||
| 2002 | 1998 | ||
| 2003 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | 1999 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
| 2004 | gfp_zone(sc->gfp_mask), sc->nodemask) { | 2000 | gfp_zone(sc->gfp_mask), sc->nodemask) { |
| @@ -2006,13 +2002,11 @@ static bool all_unreclaimable(struct zonelist *zonelist, | |||
| 2006 | continue; | 2002 | continue; |
| 2007 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | 2003 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) |
| 2008 | continue; | 2004 | continue; |
| 2009 | if (zone_reclaimable(zone)) { | 2005 | if (!zone->all_unreclaimable) |
| 2010 | all_unreclaimable = false; | 2006 | return false; |
| 2011 | break; | ||
| 2012 | } | ||
| 2013 | } | 2007 | } |
| 2014 | 2008 | ||
| 2015 | return all_unreclaimable; | 2009 | return true; |
| 2016 | } | 2010 | } |
| 2017 | 2011 | ||
| 2018 | /* | 2012 | /* |
| @@ -2108,6 +2102,14 @@ out: | |||
| 2108 | if (sc->nr_reclaimed) | 2102 | if (sc->nr_reclaimed) |
| 2109 | return sc->nr_reclaimed; | 2103 | return sc->nr_reclaimed; |
| 2110 | 2104 | ||
| 2105 | /* | ||
| 2106 | * As hibernation is going on, kswapd is freezed so that it can't mark | ||
| 2107 | * the zone into all_unreclaimable. Thus bypassing all_unreclaimable | ||
| 2108 | * check. | ||
| 2109 | */ | ||
| 2110 | if (oom_killer_disabled) | ||
| 2111 | return 0; | ||
| 2112 | |||
| 2111 | /* top priority shrink_zones still had more to do? don't OOM, then */ | 2113 | /* top priority shrink_zones still had more to do? don't OOM, then */ |
| 2112 | if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc)) | 2114 | if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc)) |
| 2113 | return 1; | 2115 | return 1; |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 772b39b87d95..897ea9e88238 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
| @@ -321,9 +321,12 @@ static inline void mod_state(struct zone *zone, | |||
| 321 | /* | 321 | /* |
| 322 | * The fetching of the stat_threshold is racy. We may apply | 322 | * The fetching of the stat_threshold is racy. We may apply |
| 323 | * a counter threshold to the wrong the cpu if we get | 323 | * a counter threshold to the wrong the cpu if we get |
| 324 | * rescheduled while executing here. However, the following | 324 | * rescheduled while executing here. However, the next |
| 325 | * will apply the threshold again and therefore bring the | 325 | * counter update will apply the threshold again and |
| 326 | * counter under the threshold. | 326 | * therefore bring the counter under the threshold again. |
| 327 | * | ||
| 328 | * Most of the time the thresholds are the same anyways | ||
| 329 | * for all cpus in a zone. | ||
| 327 | */ | 330 | */ |
| 328 | t = this_cpu_read(pcp->stat_threshold); | 331 | t = this_cpu_read(pcp->stat_threshold); |
| 329 | 332 | ||
| @@ -945,7 +948,16 @@ static const char * const vmstat_text[] = { | |||
| 945 | "unevictable_pgs_cleared", | 948 | "unevictable_pgs_cleared", |
| 946 | "unevictable_pgs_stranded", | 949 | "unevictable_pgs_stranded", |
| 947 | "unevictable_pgs_mlockfreed", | 950 | "unevictable_pgs_mlockfreed", |
| 951 | |||
| 952 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 953 | "thp_fault_alloc", | ||
| 954 | "thp_fault_fallback", | ||
| 955 | "thp_collapse_alloc", | ||
| 956 | "thp_collapse_alloc_failed", | ||
| 957 | "thp_split", | ||
| 948 | #endif | 958 | #endif |
| 959 | |||
| 960 | #endif /* CONFIG_VM_EVENTS_COUNTERS */ | ||
| 949 | }; | 961 | }; |
| 950 | 962 | ||
| 951 | static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, | 963 | static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, |
diff --git a/net/9p/client.c b/net/9p/client.c index 48b8e084e710..77367745be9b 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
| @@ -929,15 +929,15 @@ error: | |||
| 929 | } | 929 | } |
| 930 | EXPORT_SYMBOL(p9_client_attach); | 930 | EXPORT_SYMBOL(p9_client_attach); |
| 931 | 931 | ||
| 932 | struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, | 932 | struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname, |
| 933 | int clone) | 933 | char **wnames, int clone) |
| 934 | { | 934 | { |
| 935 | int err; | 935 | int err; |
| 936 | struct p9_client *clnt; | 936 | struct p9_client *clnt; |
| 937 | struct p9_fid *fid; | 937 | struct p9_fid *fid; |
| 938 | struct p9_qid *wqids; | 938 | struct p9_qid *wqids; |
| 939 | struct p9_req_t *req; | 939 | struct p9_req_t *req; |
| 940 | int16_t nwqids, count; | 940 | uint16_t nwqids, count; |
| 941 | 941 | ||
| 942 | err = 0; | 942 | err = 0; |
| 943 | wqids = NULL; | 943 | wqids = NULL; |
| @@ -955,7 +955,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, | |||
| 955 | fid = oldfid; | 955 | fid = oldfid; |
| 956 | 956 | ||
| 957 | 957 | ||
| 958 | P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %d wname[0] %s\n", | 958 | P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %ud wname[0] %s\n", |
| 959 | oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL); | 959 | oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL); |
| 960 | 960 | ||
| 961 | req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid, | 961 | req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid, |
| @@ -1220,27 +1220,6 @@ error: | |||
| 1220 | } | 1220 | } |
| 1221 | EXPORT_SYMBOL(p9_client_fsync); | 1221 | EXPORT_SYMBOL(p9_client_fsync); |
| 1222 | 1222 | ||
| 1223 | int p9_client_sync_fs(struct p9_fid *fid) | ||
| 1224 | { | ||
| 1225 | int err = 0; | ||
| 1226 | struct p9_req_t *req; | ||
| 1227 | struct p9_client *clnt; | ||
| 1228 | |||
| 1229 | P9_DPRINTK(P9_DEBUG_9P, ">>> TSYNC_FS fid %d\n", fid->fid); | ||
| 1230 | |||
| 1231 | clnt = fid->clnt; | ||
| 1232 | req = p9_client_rpc(clnt, P9_TSYNCFS, "d", fid->fid); | ||
| 1233 | if (IS_ERR(req)) { | ||
| 1234 | err = PTR_ERR(req); | ||
| 1235 | goto error; | ||
| 1236 | } | ||
| 1237 | P9_DPRINTK(P9_DEBUG_9P, "<<< RSYNCFS fid %d\n", fid->fid); | ||
| 1238 | p9_free_req(clnt, req); | ||
| 1239 | error: | ||
| 1240 | return err; | ||
| 1241 | } | ||
| 1242 | EXPORT_SYMBOL(p9_client_sync_fs); | ||
| 1243 | |||
| 1244 | int p9_client_clunk(struct p9_fid *fid) | 1223 | int p9_client_clunk(struct p9_fid *fid) |
| 1245 | { | 1224 | { |
| 1246 | int err; | 1225 | int err; |
diff --git a/net/9p/protocol.c b/net/9p/protocol.c index 8a4084fa8b5a..b58a501cf3d1 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c | |||
| @@ -265,7 +265,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
| 265 | } | 265 | } |
| 266 | break; | 266 | break; |
| 267 | case 'T':{ | 267 | case 'T':{ |
| 268 | int16_t *nwname = va_arg(ap, int16_t *); | 268 | uint16_t *nwname = va_arg(ap, uint16_t *); |
| 269 | char ***wnames = va_arg(ap, char ***); | 269 | char ***wnames = va_arg(ap, char ***); |
| 270 | 270 | ||
| 271 | errcode = p9pdu_readf(pdu, proto_version, | 271 | errcode = p9pdu_readf(pdu, proto_version, |
| @@ -468,7 +468,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
| 468 | case 'E':{ | 468 | case 'E':{ |
| 469 | int32_t cnt = va_arg(ap, int32_t); | 469 | int32_t cnt = va_arg(ap, int32_t); |
| 470 | const char *k = va_arg(ap, const void *); | 470 | const char *k = va_arg(ap, const void *); |
| 471 | const char *u = va_arg(ap, const void *); | 471 | const char __user *u = va_arg(ap, |
| 472 | const void __user *); | ||
| 472 | errcode = p9pdu_writef(pdu, proto_version, "d", | 473 | errcode = p9pdu_writef(pdu, proto_version, "d", |
| 473 | cnt); | 474 | cnt); |
| 474 | if (!errcode && pdu_write_urw(pdu, k, u, cnt)) | 475 | if (!errcode && pdu_write_urw(pdu, k, u, cnt)) |
| @@ -495,7 +496,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
| 495 | } | 496 | } |
| 496 | break; | 497 | break; |
| 497 | case 'T':{ | 498 | case 'T':{ |
| 498 | int16_t nwname = va_arg(ap, int); | 499 | uint16_t nwname = va_arg(ap, int); |
| 499 | const char **wnames = va_arg(ap, const char **); | 500 | const char **wnames = va_arg(ap, const char **); |
| 500 | 501 | ||
| 501 | errcode = p9pdu_writef(pdu, proto_version, "w", | 502 | errcode = p9pdu_writef(pdu, proto_version, "w", |
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c index d47880e971dd..e883172f9aa2 100644 --- a/net/9p/trans_common.c +++ b/net/9p/trans_common.c | |||
| @@ -66,7 +66,7 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len, | |||
| 66 | uint32_t pdata_mapped_pages; | 66 | uint32_t pdata_mapped_pages; |
| 67 | struct trans_rpage_info *rpinfo; | 67 | struct trans_rpage_info *rpinfo; |
| 68 | 68 | ||
| 69 | *pdata_off = (size_t)req->tc->pubuf & (PAGE_SIZE-1); | 69 | *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1); |
| 70 | 70 | ||
| 71 | if (*pdata_off) | 71 | if (*pdata_off) |
| 72 | first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off), | 72 | first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off), |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index e8f046b07182..244e70742183 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
| @@ -326,8 +326,11 @@ req_retry_pinned: | |||
| 326 | outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, | 326 | outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, |
| 327 | pdata_off, rpinfo->rp_data, pdata_len); | 327 | pdata_off, rpinfo->rp_data, pdata_len); |
| 328 | } else { | 328 | } else { |
| 329 | char *pbuf = req->tc->pubuf ? req->tc->pubuf : | 329 | char *pbuf; |
| 330 | req->tc->pkbuf; | 330 | if (req->tc->pubuf) |
| 331 | pbuf = (__force char *) req->tc->pubuf; | ||
| 332 | else | ||
| 333 | pbuf = req->tc->pkbuf; | ||
| 331 | outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf, | 334 | outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf, |
| 332 | req->tc->pbuf_size); | 335 | req->tc->pbuf_size); |
| 333 | } | 336 | } |
| @@ -352,8 +355,12 @@ req_retry_pinned: | |||
| 352 | in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM, | 355 | in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM, |
| 353 | pdata_off, rpinfo->rp_data, pdata_len); | 356 | pdata_off, rpinfo->rp_data, pdata_len); |
| 354 | } else { | 357 | } else { |
| 355 | char *pbuf = req->tc->pubuf ? req->tc->pubuf : | 358 | char *pbuf; |
| 356 | req->tc->pkbuf; | 359 | if (req->tc->pubuf) |
| 360 | pbuf = (__force char *) req->tc->pubuf; | ||
| 361 | else | ||
| 362 | pbuf = req->tc->pkbuf; | ||
| 363 | |||
| 357 | in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM, | 364 | in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM, |
| 358 | pbuf, req->tc->pbuf_size); | 365 | pbuf, req->tc->pbuf_size); |
| 359 | } | 366 | } |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 50af02737a3d..5a80f41c0cba 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
| @@ -579,9 +579,15 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc, | |||
| 579 | 579 | ||
| 580 | list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, | 580 | list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, |
| 581 | r_linger_osd) { | 581 | r_linger_osd) { |
| 582 | __unregister_linger_request(osdc, req); | 582 | /* |
| 583 | * reregister request prior to unregistering linger so | ||
| 584 | * that r_osd is preserved. | ||
| 585 | */ | ||
| 586 | BUG_ON(!list_empty(&req->r_req_lru_item)); | ||
| 583 | __register_request(osdc, req); | 587 | __register_request(osdc, req); |
| 584 | list_move(&req->r_req_lru_item, &osdc->req_unsent); | 588 | list_add(&req->r_req_lru_item, &osdc->req_unsent); |
| 589 | list_add(&req->r_osd_item, &req->r_osd->o_requests); | ||
| 590 | __unregister_linger_request(osdc, req); | ||
| 585 | dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid, | 591 | dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid, |
| 586 | osd->o_osd); | 592 | osd->o_osd); |
| 587 | } | 593 | } |
| @@ -798,7 +804,7 @@ static void __register_request(struct ceph_osd_client *osdc, | |||
| 798 | req->r_request->hdr.tid = cpu_to_le64(req->r_tid); | 804 | req->r_request->hdr.tid = cpu_to_le64(req->r_tid); |
| 799 | INIT_LIST_HEAD(&req->r_req_lru_item); | 805 | INIT_LIST_HEAD(&req->r_req_lru_item); |
| 800 | 806 | ||
| 801 | dout("register_request %p tid %lld\n", req, req->r_tid); | 807 | dout("__register_request %p tid %lld\n", req, req->r_tid); |
| 802 | __insert_request(osdc, req); | 808 | __insert_request(osdc, req); |
| 803 | ceph_osdc_get_request(req); | 809 | ceph_osdc_get_request(req); |
| 804 | osdc->num_requests++; | 810 | osdc->num_requests++; |
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c index 9fea75535221..96bee5c46008 100644 --- a/tools/perf/util/cgroup.c +++ b/tools/perf/util/cgroup.c | |||
| @@ -13,7 +13,7 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen) | |||
| 13 | { | 13 | { |
| 14 | FILE *fp; | 14 | FILE *fp; |
| 15 | char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1]; | 15 | char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1]; |
| 16 | char *token, *saved_ptr; | 16 | char *token, *saved_ptr = NULL; |
| 17 | int found = 0; | 17 | int found = 0; |
| 18 | 18 | ||
| 19 | fp = fopen("/proc/mounts", "r"); | 19 | fp = fopen("/proc/mounts", "r"); |
