diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-05-20 14:06:24 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-05-20 14:08:05 -0400 |
commit | 250f972d85effad5b6e10da4bbd877e6a4b503b6 (patch) | |
tree | 007393a6fc6439af7e0121dd99a6f9f9fb8405bc /include | |
parent | 7372b0b122af0f6675f3ab65bfd91c8a438e0480 (diff) | |
parent | bbe7b8bef48c567f5ff3f6041c1fb011292e8f12 (diff) |
Merge branch 'timers/urgent' into timers/core
Reason: Get upstream fixes and kfree_rcu which is necessary for a
follow up patch.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include')
71 files changed, 1064 insertions, 477 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index bd297a20ab98..077c00d94f6e 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -170,6 +170,10 @@ | |||
170 | STRUCT_ALIGN(); \ | 170 | STRUCT_ALIGN(); \ |
171 | *(__tracepoints) \ | 171 | *(__tracepoints) \ |
172 | /* implement dynamic printk debug */ \ | 172 | /* implement dynamic printk debug */ \ |
173 | . = ALIGN(8); \ | ||
174 | VMLINUX_SYMBOL(__start___jump_table) = .; \ | ||
175 | *(__jump_table) \ | ||
176 | VMLINUX_SYMBOL(__stop___jump_table) = .; \ | ||
173 | . = ALIGN(8); \ | 177 | . = ALIGN(8); \ |
174 | VMLINUX_SYMBOL(__start___verbose) = .; \ | 178 | VMLINUX_SYMBOL(__start___verbose) = .; \ |
175 | *(__verbose) \ | 179 | *(__verbose) \ |
@@ -228,8 +232,6 @@ | |||
228 | \ | 232 | \ |
229 | BUG_TABLE \ | 233 | BUG_TABLE \ |
230 | \ | 234 | \ |
231 | JUMP_TABLE \ | ||
232 | \ | ||
233 | /* PCI quirks */ \ | 235 | /* PCI quirks */ \ |
234 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ | 236 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ |
235 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ | 237 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ |
@@ -274,70 +276,70 @@ | |||
274 | /* Kernel symbol table: Normal symbols */ \ | 276 | /* Kernel symbol table: Normal symbols */ \ |
275 | __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ | 277 | __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ |
276 | VMLINUX_SYMBOL(__start___ksymtab) = .; \ | 278 | VMLINUX_SYMBOL(__start___ksymtab) = .; \ |
277 | *(__ksymtab) \ | 279 | *(SORT(___ksymtab+*)) \ |
278 | VMLINUX_SYMBOL(__stop___ksymtab) = .; \ | 280 | VMLINUX_SYMBOL(__stop___ksymtab) = .; \ |
279 | } \ | 281 | } \ |
280 | \ | 282 | \ |
281 | /* Kernel symbol table: GPL-only symbols */ \ | 283 | /* Kernel symbol table: GPL-only symbols */ \ |
282 | __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ | 284 | __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ |
283 | VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ | 285 | VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ |
284 | *(__ksymtab_gpl) \ | 286 | *(SORT(___ksymtab_gpl+*)) \ |
285 | VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ | 287 | VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ |
286 | } \ | 288 | } \ |
287 | \ | 289 | \ |
288 | /* Kernel symbol table: Normal unused symbols */ \ | 290 | /* Kernel symbol table: Normal unused symbols */ \ |
289 | __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ | 291 | __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ |
290 | VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ | 292 | VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ |
291 | *(__ksymtab_unused) \ | 293 | *(SORT(___ksymtab_unused+*)) \ |
292 | VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ | 294 | VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ |
293 | } \ | 295 | } \ |
294 | \ | 296 | \ |
295 | /* Kernel symbol table: GPL-only unused symbols */ \ | 297 | /* Kernel symbol table: GPL-only unused symbols */ \ |
296 | __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ | 298 | __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ |
297 | VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ | 299 | VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ |
298 | *(__ksymtab_unused_gpl) \ | 300 | *(SORT(___ksymtab_unused_gpl+*)) \ |
299 | VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ | 301 | VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ |
300 | } \ | 302 | } \ |
301 | \ | 303 | \ |
302 | /* Kernel symbol table: GPL-future-only symbols */ \ | 304 | /* Kernel symbol table: GPL-future-only symbols */ \ |
303 | __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ | 305 | __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ |
304 | VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ | 306 | VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ |
305 | *(__ksymtab_gpl_future) \ | 307 | *(SORT(___ksymtab_gpl_future+*)) \ |
306 | VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ | 308 | VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ |
307 | } \ | 309 | } \ |
308 | \ | 310 | \ |
309 | /* Kernel symbol table: Normal symbols */ \ | 311 | /* Kernel symbol table: Normal symbols */ \ |
310 | __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ | 312 | __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ |
311 | VMLINUX_SYMBOL(__start___kcrctab) = .; \ | 313 | VMLINUX_SYMBOL(__start___kcrctab) = .; \ |
312 | *(__kcrctab) \ | 314 | *(SORT(___kcrctab+*)) \ |
313 | VMLINUX_SYMBOL(__stop___kcrctab) = .; \ | 315 | VMLINUX_SYMBOL(__stop___kcrctab) = .; \ |
314 | } \ | 316 | } \ |
315 | \ | 317 | \ |
316 | /* Kernel symbol table: GPL-only symbols */ \ | 318 | /* Kernel symbol table: GPL-only symbols */ \ |
317 | __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ | 319 | __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ |
318 | VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ | 320 | VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ |
319 | *(__kcrctab_gpl) \ | 321 | *(SORT(___kcrctab_gpl+*)) \ |
320 | VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ | 322 | VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ |
321 | } \ | 323 | } \ |
322 | \ | 324 | \ |
323 | /* Kernel symbol table: Normal unused symbols */ \ | 325 | /* Kernel symbol table: Normal unused symbols */ \ |
324 | __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ | 326 | __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ |
325 | VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ | 327 | VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ |
326 | *(__kcrctab_unused) \ | 328 | *(SORT(___kcrctab_unused+*)) \ |
327 | VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ | 329 | VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ |
328 | } \ | 330 | } \ |
329 | \ | 331 | \ |
330 | /* Kernel symbol table: GPL-only unused symbols */ \ | 332 | /* Kernel symbol table: GPL-only unused symbols */ \ |
331 | __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ | 333 | __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ |
332 | VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ | 334 | VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ |
333 | *(__kcrctab_unused_gpl) \ | 335 | *(SORT(___kcrctab_unused_gpl+*)) \ |
334 | VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ | 336 | VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ |
335 | } \ | 337 | } \ |
336 | \ | 338 | \ |
337 | /* Kernel symbol table: GPL-future-only symbols */ \ | 339 | /* Kernel symbol table: GPL-future-only symbols */ \ |
338 | __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ | 340 | __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ |
339 | VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ | 341 | VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ |
340 | *(__kcrctab_gpl_future) \ | 342 | *(SORT(___kcrctab_gpl_future+*)) \ |
341 | VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ | 343 | VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ |
342 | } \ | 344 | } \ |
343 | \ | 345 | \ |
@@ -589,14 +591,6 @@ | |||
589 | #define BUG_TABLE | 591 | #define BUG_TABLE |
590 | #endif | 592 | #endif |
591 | 593 | ||
592 | #define JUMP_TABLE \ | ||
593 | . = ALIGN(8); \ | ||
594 | __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \ | ||
595 | VMLINUX_SYMBOL(__start___jump_table) = .; \ | ||
596 | *(__jump_table) \ | ||
597 | VMLINUX_SYMBOL(__stop___jump_table) = .; \ | ||
598 | } | ||
599 | |||
600 | #ifdef CONFIG_PM_TRACE | 594 | #ifdef CONFIG_PM_TRACE |
601 | #define TRACEDATA \ | 595 | #define TRACEDATA \ |
602 | . = ALIGN(4); \ | 596 | . = ALIGN(4); \ |
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index ade09d7b4271..c99c3d3e7811 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h | |||
@@ -127,7 +127,7 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, | |||
127 | 127 | ||
128 | int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); | 128 | int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); |
129 | 129 | ||
130 | bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); | 130 | int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); |
131 | bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); | 131 | bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); |
132 | int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); | 132 | int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); |
133 | int drm_fb_helper_debug_enter(struct fb_info *info); | 133 | int drm_fb_helper_debug_enter(struct fb_info *info); |
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index c2f93a8ae2e1..564b14aa7e16 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h | |||
@@ -86,7 +86,7 @@ static inline bool drm_mm_initialized(struct drm_mm *mm) | |||
86 | } | 86 | } |
87 | #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ | 87 | #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ |
88 | &(mm)->head_node.node_list, \ | 88 | &(mm)->head_node.node_list, \ |
89 | node_list); | 89 | node_list) |
90 | #define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \ | 90 | #define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \ |
91 | for (entry = (mm)->prev_scanned_node, \ | 91 | for (entry = (mm)->prev_scanned_node, \ |
92 | next = entry ? list_entry(entry->node_list.next, \ | 92 | next = entry ? list_entry(entry->node_list.next, \ |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 816e30cbd968..f04b2a3b0f49 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -155,6 +155,7 @@ | |||
155 | {0x1002, 0x6719, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ | 155 | {0x1002, 0x6719, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ |
156 | {0x1002, 0x671c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ | 156 | {0x1002, 0x671c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ |
157 | {0x1002, 0x671d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ | 157 | {0x1002, 0x671d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ |
158 | {0x1002, 0x671f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ | ||
158 | {0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 159 | {0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
159 | {0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 160 | {0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
160 | {0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ | 161 | {0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ |
@@ -167,6 +168,7 @@ | |||
167 | {0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ | 168 | {0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ |
168 | {0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ | 169 | {0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ |
169 | {0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ | 170 | {0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ |
171 | {0x1002, 0x673e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ | ||
170 | {0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 172 | {0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
171 | {0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 173 | {0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
172 | {0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 174 | {0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
@@ -199,6 +201,7 @@ | |||
199 | {0x1002, 0x688D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ | 201 | {0x1002, 0x688D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ |
200 | {0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ | 202 | {0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ |
201 | {0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ | 203 | {0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ |
204 | {0x1002, 0x689b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ | ||
202 | {0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ | 205 | {0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ |
203 | {0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ | 206 | {0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ |
204 | {0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ | 207 | {0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ |
@@ -209,7 +212,9 @@ | |||
209 | {0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 212 | {0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
210 | {0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ | 213 | {0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ |
211 | {0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ | 214 | {0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ |
215 | {0x1002, 0x68ba, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ | ||
212 | {0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ | 216 | {0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ |
217 | {0x1002, 0x68bf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ | ||
213 | {0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 218 | {0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
214 | {0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 219 | {0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
215 | {0x1002, 0x68c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 220 | {0x1002, 0x68c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index 7aa5dddb2098..787f7b6fd622 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h | |||
@@ -910,6 +910,7 @@ struct drm_radeon_cs { | |||
910 | #define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */ | 910 | #define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */ |
911 | #define RADEON_INFO_NUM_BACKENDS 0x0a /* DB/backends for r600+ - need for OQ */ | 911 | #define RADEON_INFO_NUM_BACKENDS 0x0a /* DB/backends for r600+ - need for OQ */ |
912 | #define RADEON_INFO_NUM_TILE_PIPES 0x0b /* tile pipes for r600+ */ | 912 | #define RADEON_INFO_NUM_TILE_PIPES 0x0b /* tile pipes for r600+ */ |
913 | #define RADEON_INFO_FUSION_GART_WORKING 0x0c /* fusion writes to GTT were broken before this */ | ||
913 | 914 | ||
914 | struct drm_radeon_info { | 915 | struct drm_radeon_info { |
915 | uint32_t request; | 916 | uint32_t request; |
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index b8613e806aa9..01eca1794e14 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
@@ -111,6 +111,8 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, | |||
111 | __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 111 | __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
112 | #define alloc_bootmem_node(pgdat, x) \ | 112 | #define alloc_bootmem_node(pgdat, x) \ |
113 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 113 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
114 | #define alloc_bootmem_node_nopanic(pgdat, x) \ | ||
115 | __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | ||
114 | #define alloc_bootmem_pages_node(pgdat, x) \ | 116 | #define alloc_bootmem_pages_node(pgdat, x) \ |
115 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 117 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
116 | #define alloc_bootmem_pages_node_nopanic(pgdat, x) \ | 118 | #define alloc_bootmem_pages_node_nopanic(pgdat, x) \ |
diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h new file mode 100644 index 000000000000..90b1aa867224 --- /dev/null +++ b/include/linux/bsearch.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef _LINUX_BSEARCH_H | ||
2 | #define _LINUX_BSEARCH_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | void *bsearch(const void *key, const void *base, size_t num, size_t size, | ||
7 | int (*cmp)(const void *key, const void *elt)); | ||
8 | |||
9 | #endif /* _LINUX_BSEARCH_H */ | ||
diff --git a/include/linux/capability.h b/include/linux/capability.h index 7cb23eae693d..4554db0cde86 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h | |||
@@ -551,18 +551,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap); | |||
551 | extern bool capable(int cap); | 551 | extern bool capable(int cap); |
552 | extern bool ns_capable(struct user_namespace *ns, int cap); | 552 | extern bool ns_capable(struct user_namespace *ns, int cap); |
553 | extern bool task_ns_capable(struct task_struct *t, int cap); | 553 | extern bool task_ns_capable(struct task_struct *t, int cap); |
554 | 554 | extern bool nsown_capable(int cap); | |
555 | /** | ||
556 | * nsown_capable - Check superior capability to one's own user_ns | ||
557 | * @cap: The capability in question | ||
558 | * | ||
559 | * Return true if the current task has the given superior capability | ||
560 | * targeted at its own user namespace. | ||
561 | */ | ||
562 | static inline bool nsown_capable(int cap) | ||
563 | { | ||
564 | return ns_capable(current_user_ns(), cap); | ||
565 | } | ||
566 | 555 | ||
567 | /* audit system wants to get cap info from files as well */ | 556 | /* audit system wants to get cap info from files as well */ |
568 | extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); | 557 | extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); |
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index fc53492b6ad7..d6733e27af34 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
@@ -56,46 +56,52 @@ enum clock_event_nofitiers { | |||
56 | 56 | ||
57 | /** | 57 | /** |
58 | * struct clock_event_device - clock event device descriptor | 58 | * struct clock_event_device - clock event device descriptor |
59 | * @name: ptr to clock event name | 59 | * @event_handler: Assigned by the framework to be called by the low |
60 | * @features: features | 60 | * level handler of the event source |
61 | * @set_next_event: set next event function | ||
62 | * @next_event: local storage for the next event in oneshot mode | ||
61 | * @max_delta_ns: maximum delta value in ns | 63 | * @max_delta_ns: maximum delta value in ns |
62 | * @min_delta_ns: minimum delta value in ns | 64 | * @min_delta_ns: minimum delta value in ns |
63 | * @mult: nanosecond to cycles multiplier | 65 | * @mult: nanosecond to cycles multiplier |
64 | * @shift: nanoseconds to cycles divisor (power of two) | 66 | * @shift: nanoseconds to cycles divisor (power of two) |
67 | * @mode: operating mode assigned by the management code | ||
68 | * @features: features | ||
69 | * @retries: number of forced programming retries | ||
70 | * @set_mode: set mode function | ||
71 | * @broadcast: function to broadcast events | ||
72 | * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration | ||
73 | * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration | ||
74 | * @name: ptr to clock event name | ||
65 | * @rating: variable to rate clock event devices | 75 | * @rating: variable to rate clock event devices |
66 | * @irq: IRQ number (only for non CPU local devices) | 76 | * @irq: IRQ number (only for non CPU local devices) |
67 | * @cpumask: cpumask to indicate for which CPUs this device works | 77 | * @cpumask: cpumask to indicate for which CPUs this device works |
68 | * @set_next_event: set next event function | ||
69 | * @set_mode: set mode function | ||
70 | * @event_handler: Assigned by the framework to be called by the low | ||
71 | * level handler of the event source | ||
72 | * @broadcast: function to broadcast events | ||
73 | * @list: list head for the management code | 78 | * @list: list head for the management code |
74 | * @mode: operating mode assigned by the management code | ||
75 | * @next_event: local storage for the next event in oneshot mode | ||
76 | * @retries: number of forced programming retries | ||
77 | */ | 79 | */ |
78 | struct clock_event_device { | 80 | struct clock_event_device { |
79 | const char *name; | 81 | void (*event_handler)(struct clock_event_device *); |
80 | unsigned int features; | 82 | int (*set_next_event)(unsigned long evt, |
83 | struct clock_event_device *); | ||
84 | ktime_t next_event; | ||
81 | u64 max_delta_ns; | 85 | u64 max_delta_ns; |
82 | u64 min_delta_ns; | 86 | u64 min_delta_ns; |
83 | u32 mult; | 87 | u32 mult; |
84 | u32 shift; | 88 | u32 shift; |
89 | enum clock_event_mode mode; | ||
90 | unsigned int features; | ||
91 | unsigned long retries; | ||
92 | |||
93 | void (*broadcast)(const struct cpumask *mask); | ||
94 | void (*set_mode)(enum clock_event_mode mode, | ||
95 | struct clock_event_device *); | ||
96 | unsigned long min_delta_ticks; | ||
97 | unsigned long max_delta_ticks; | ||
98 | |||
99 | const char *name; | ||
85 | int rating; | 100 | int rating; |
86 | int irq; | 101 | int irq; |
87 | const struct cpumask *cpumask; | 102 | const struct cpumask *cpumask; |
88 | int (*set_next_event)(unsigned long evt, | ||
89 | struct clock_event_device *); | ||
90 | void (*set_mode)(enum clock_event_mode mode, | ||
91 | struct clock_event_device *); | ||
92 | void (*event_handler)(struct clock_event_device *); | ||
93 | void (*broadcast)(const struct cpumask *mask); | ||
94 | struct list_head list; | 103 | struct list_head list; |
95 | enum clock_event_mode mode; | 104 | } ____cacheline_aligned; |
96 | ktime_t next_event; | ||
97 | unsigned long retries; | ||
98 | }; | ||
99 | 105 | ||
100 | /* | 106 | /* |
101 | * Calculate a multiplication factor for scaled math, which is used to convert | 107 | * Calculate a multiplication factor for scaled math, which is used to convert |
@@ -122,6 +128,12 @@ extern u64 clockevent_delta2ns(unsigned long latch, | |||
122 | struct clock_event_device *evt); | 128 | struct clock_event_device *evt); |
123 | extern void clockevents_register_device(struct clock_event_device *dev); | 129 | extern void clockevents_register_device(struct clock_event_device *dev); |
124 | 130 | ||
131 | extern void clockevents_config_and_register(struct clock_event_device *dev, | ||
132 | u32 freq, unsigned long min_delta, | ||
133 | unsigned long max_delta); | ||
134 | |||
135 | extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); | ||
136 | |||
125 | extern void clockevents_exchange_device(struct clock_event_device *old, | 137 | extern void clockevents_exchange_device(struct clock_event_device *old, |
126 | struct clock_event_device *new); | 138 | struct clock_event_device *new); |
127 | extern void clockevents_set_mode(struct clock_event_device *dev, | 139 | extern void clockevents_set_mode(struct clock_event_device *dev, |
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index c37b21ad5a3b..c918fbd33ee5 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
@@ -159,42 +159,38 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, | |||
159 | */ | 159 | */ |
160 | struct clocksource { | 160 | struct clocksource { |
161 | /* | 161 | /* |
162 | * First part of structure is read mostly | 162 | * Hotpath data, fits in a single cache line when the |
163 | * clocksource itself is cacheline aligned. | ||
163 | */ | 164 | */ |
164 | char *name; | ||
165 | struct list_head list; | ||
166 | int rating; | ||
167 | cycle_t (*read)(struct clocksource *cs); | 165 | cycle_t (*read)(struct clocksource *cs); |
168 | int (*enable)(struct clocksource *cs); | 166 | cycle_t cycle_last; |
169 | void (*disable)(struct clocksource *cs); | ||
170 | cycle_t mask; | 167 | cycle_t mask; |
171 | u32 mult; | 168 | u32 mult; |
172 | u32 shift; | 169 | u32 shift; |
173 | u64 max_idle_ns; | 170 | u64 max_idle_ns; |
174 | unsigned long flags; | 171 | |
175 | cycle_t (*vread)(void); | ||
176 | void (*suspend)(struct clocksource *cs); | ||
177 | void (*resume)(struct clocksource *cs); | ||
178 | #ifdef CONFIG_IA64 | 172 | #ifdef CONFIG_IA64 |
179 | void *fsys_mmio; /* used by fsyscall asm code */ | 173 | void *fsys_mmio; /* used by fsyscall asm code */ |
180 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr)) | 174 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr)) |
181 | #else | 175 | #else |
182 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) | 176 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) |
183 | #endif | 177 | #endif |
184 | 178 | const char *name; | |
185 | /* | 179 | struct list_head list; |
186 | * Second part is written at each timer interrupt | 180 | int rating; |
187 | * Keep it in a different cache line to dirty no | 181 | cycle_t (*vread)(void); |
188 | * more than one cache line. | 182 | int (*enable)(struct clocksource *cs); |
189 | */ | 183 | void (*disable)(struct clocksource *cs); |
190 | cycle_t cycle_last ____cacheline_aligned_in_smp; | 184 | unsigned long flags; |
185 | void (*suspend)(struct clocksource *cs); | ||
186 | void (*resume)(struct clocksource *cs); | ||
191 | 187 | ||
192 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | 188 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
193 | /* Watchdog related data, used by the framework */ | 189 | /* Watchdog related data, used by the framework */ |
194 | struct list_head wd_list; | 190 | struct list_head wd_list; |
195 | cycle_t wd_last; | 191 | cycle_t wd_last; |
196 | #endif | 192 | #endif |
197 | }; | 193 | } ____cacheline_aligned; |
198 | 194 | ||
199 | /* | 195 | /* |
200 | * Clock source flags bits:: | 196 | * Clock source flags bits:: |
@@ -341,4 +337,6 @@ static inline void update_vsyscall_tz(void) | |||
341 | 337 | ||
342 | extern void timekeeping_notify(struct clocksource *clock); | 338 | extern void timekeeping_notify(struct clocksource *clock); |
343 | 339 | ||
340 | extern int clocksource_i8253_init(void); | ||
341 | |||
344 | #endif /* _LINUX_CLOCKSOURCE_H */ | 342 | #endif /* _LINUX_CLOCKSOURCE_H */ |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 9343dd3de858..11be48e0d168 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2001 Russell King | 4 | * Copyright (C) 2001 Russell King |
5 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> | 5 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
@@ -56,9 +56,9 @@ static inline int cpufreq_unregister_notifier(struct notifier_block *nb, | |||
56 | #define CPUFREQ_POLICY_POWERSAVE (1) | 56 | #define CPUFREQ_POLICY_POWERSAVE (1) |
57 | #define CPUFREQ_POLICY_PERFORMANCE (2) | 57 | #define CPUFREQ_POLICY_PERFORMANCE (2) |
58 | 58 | ||
59 | /* Frequency values here are CPU kHz so that hardware which doesn't run | 59 | /* Frequency values here are CPU kHz so that hardware which doesn't run |
60 | * with some frequencies can complain without having to guess what per | 60 | * with some frequencies can complain without having to guess what per |
61 | * cent / per mille means. | 61 | * cent / per mille means. |
62 | * Maximum transition latency is in nanoseconds - if it's unknown, | 62 | * Maximum transition latency is in nanoseconds - if it's unknown, |
63 | * CPUFREQ_ETERNAL shall be used. | 63 | * CPUFREQ_ETERNAL shall be used. |
64 | */ | 64 | */ |
@@ -72,13 +72,15 @@ extern struct kobject *cpufreq_global_kobject; | |||
72 | struct cpufreq_cpuinfo { | 72 | struct cpufreq_cpuinfo { |
73 | unsigned int max_freq; | 73 | unsigned int max_freq; |
74 | unsigned int min_freq; | 74 | unsigned int min_freq; |
75 | unsigned int transition_latency; /* in 10^(-9) s = nanoseconds */ | 75 | |
76 | /* in 10^(-9) s = nanoseconds */ | ||
77 | unsigned int transition_latency; | ||
76 | }; | 78 | }; |
77 | 79 | ||
78 | struct cpufreq_real_policy { | 80 | struct cpufreq_real_policy { |
79 | unsigned int min; /* in kHz */ | 81 | unsigned int min; /* in kHz */ |
80 | unsigned int max; /* in kHz */ | 82 | unsigned int max; /* in kHz */ |
81 | unsigned int policy; /* see above */ | 83 | unsigned int policy; /* see above */ |
82 | struct cpufreq_governor *governor; /* see below */ | 84 | struct cpufreq_governor *governor; /* see below */ |
83 | }; | 85 | }; |
84 | 86 | ||
@@ -94,7 +96,7 @@ struct cpufreq_policy { | |||
94 | unsigned int max; /* in kHz */ | 96 | unsigned int max; /* in kHz */ |
95 | unsigned int cur; /* in kHz, only needed if cpufreq | 97 | unsigned int cur; /* in kHz, only needed if cpufreq |
96 | * governors are used */ | 98 | * governors are used */ |
97 | unsigned int policy; /* see above */ | 99 | unsigned int policy; /* see above */ |
98 | struct cpufreq_governor *governor; /* see below */ | 100 | struct cpufreq_governor *governor; /* see below */ |
99 | 101 | ||
100 | struct work_struct update; /* if update_policy() needs to be | 102 | struct work_struct update; /* if update_policy() needs to be |
@@ -167,11 +169,11 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mu | |||
167 | 169 | ||
168 | struct cpufreq_governor { | 170 | struct cpufreq_governor { |
169 | char name[CPUFREQ_NAME_LEN]; | 171 | char name[CPUFREQ_NAME_LEN]; |
170 | int (*governor) (struct cpufreq_policy *policy, | 172 | int (*governor) (struct cpufreq_policy *policy, |
171 | unsigned int event); | 173 | unsigned int event); |
172 | ssize_t (*show_setspeed) (struct cpufreq_policy *policy, | 174 | ssize_t (*show_setspeed) (struct cpufreq_policy *policy, |
173 | char *buf); | 175 | char *buf); |
174 | int (*store_setspeed) (struct cpufreq_policy *policy, | 176 | int (*store_setspeed) (struct cpufreq_policy *policy, |
175 | unsigned int freq); | 177 | unsigned int freq); |
176 | unsigned int max_transition_latency; /* HW must be able to switch to | 178 | unsigned int max_transition_latency; /* HW must be able to switch to |
177 | next freq faster than this value in nano secs or we | 179 | next freq faster than this value in nano secs or we |
@@ -180,7 +182,8 @@ struct cpufreq_governor { | |||
180 | struct module *owner; | 182 | struct module *owner; |
181 | }; | 183 | }; |
182 | 184 | ||
183 | /* pass a target to the cpufreq driver | 185 | /* |
186 | * Pass a target to the cpufreq driver. | ||
184 | */ | 187 | */ |
185 | extern int cpufreq_driver_target(struct cpufreq_policy *policy, | 188 | extern int cpufreq_driver_target(struct cpufreq_policy *policy, |
186 | unsigned int target_freq, | 189 | unsigned int target_freq, |
@@ -237,9 +240,9 @@ struct cpufreq_driver { | |||
237 | 240 | ||
238 | /* flags */ | 241 | /* flags */ |
239 | 242 | ||
240 | #define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if | 243 | #define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if |
241 | * all ->init() calls failed */ | 244 | * all ->init() calls failed */ |
242 | #define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel | 245 | #define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel |
243 | * "constants" aren't affected by | 246 | * "constants" aren't affected by |
244 | * frequency transitions */ | 247 | * frequency transitions */ |
245 | #define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed | 248 | #define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed |
@@ -252,7 +255,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); | |||
252 | void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state); | 255 | void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state); |
253 | 256 | ||
254 | 257 | ||
255 | static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) | 258 | static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) |
256 | { | 259 | { |
257 | if (policy->min < min) | 260 | if (policy->min < min) |
258 | policy->min = min; | 261 | policy->min = min; |
@@ -386,34 +389,15 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, | |||
386 | /* the following 3 funtions are for cpufreq core use only */ | 389 | /* the following 3 funtions are for cpufreq core use only */ |
387 | struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); | 390 | struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); |
388 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); | 391 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); |
389 | void cpufreq_cpu_put (struct cpufreq_policy *data); | 392 | void cpufreq_cpu_put(struct cpufreq_policy *data); |
390 | 393 | ||
391 | /* the following are really really optional */ | 394 | /* the following are really really optional */ |
392 | extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; | 395 | extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; |
393 | 396 | ||
394 | void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, | 397 | void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, |
395 | unsigned int cpu); | 398 | unsigned int cpu); |
396 | 399 | ||
397 | void cpufreq_frequency_table_put_attr(unsigned int cpu); | 400 | void cpufreq_frequency_table_put_attr(unsigned int cpu); |
398 | 401 | ||
399 | 402 | ||
400 | /********************************************************************* | ||
401 | * UNIFIED DEBUG HELPERS * | ||
402 | *********************************************************************/ | ||
403 | |||
404 | #define CPUFREQ_DEBUG_CORE 1 | ||
405 | #define CPUFREQ_DEBUG_DRIVER 2 | ||
406 | #define CPUFREQ_DEBUG_GOVERNOR 4 | ||
407 | |||
408 | #ifdef CONFIG_CPU_FREQ_DEBUG | ||
409 | |||
410 | extern void cpufreq_debug_printk(unsigned int type, const char *prefix, | ||
411 | const char *fmt, ...); | ||
412 | |||
413 | #else | ||
414 | |||
415 | #define cpufreq_debug_printk(msg...) do { } while(0) | ||
416 | |||
417 | #endif /* CONFIG_CPU_FREQ_DEBUG */ | ||
418 | |||
419 | #endif /* _LINUX_CPUFREQ_H */ | 403 | #endif /* _LINUX_CPUFREQ_H */ |
diff --git a/include/linux/cred.h b/include/linux/cred.h index 9aeeb0ba2003..be16b61283cc 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
@@ -146,6 +146,7 @@ struct cred { | |||
146 | void *security; /* subjective LSM security */ | 146 | void *security; /* subjective LSM security */ |
147 | #endif | 147 | #endif |
148 | struct user_struct *user; /* real user ID subscription */ | 148 | struct user_struct *user; /* real user ID subscription */ |
149 | struct user_namespace *user_ns; /* cached user->user_ns */ | ||
149 | struct group_info *group_info; /* supplementary groups for euid/fsgid */ | 150 | struct group_info *group_info; /* supplementary groups for euid/fsgid */ |
150 | struct rcu_head rcu; /* RCU deletion hook */ | 151 | struct rcu_head rcu; /* RCU deletion hook */ |
151 | }; | 152 | }; |
@@ -354,10 +355,15 @@ static inline void put_cred(const struct cred *_cred) | |||
354 | #define current_fsgid() (current_cred_xxx(fsgid)) | 355 | #define current_fsgid() (current_cred_xxx(fsgid)) |
355 | #define current_cap() (current_cred_xxx(cap_effective)) | 356 | #define current_cap() (current_cred_xxx(cap_effective)) |
356 | #define current_user() (current_cred_xxx(user)) | 357 | #define current_user() (current_cred_xxx(user)) |
357 | #define _current_user_ns() (current_cred_xxx(user)->user_ns) | ||
358 | #define current_security() (current_cred_xxx(security)) | 358 | #define current_security() (current_cred_xxx(security)) |
359 | 359 | ||
360 | extern struct user_namespace *current_user_ns(void); | 360 | #ifdef CONFIG_USER_NS |
361 | #define current_user_ns() (current_cred_xxx(user_ns)) | ||
362 | #else | ||
363 | extern struct user_namespace init_user_ns; | ||
364 | #define current_user_ns() (&init_user_ns) | ||
365 | #endif | ||
366 | |||
361 | 367 | ||
362 | #define current_uid_gid(_uid, _gid) \ | 368 | #define current_uid_gid(_uid, _gid) \ |
363 | do { \ | 369 | do { \ |
diff --git a/include/linux/device.h b/include/linux/device.h index ab8dfc095709..c66111affca9 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -47,6 +47,38 @@ extern int __must_check bus_create_file(struct bus_type *, | |||
47 | struct bus_attribute *); | 47 | struct bus_attribute *); |
48 | extern void bus_remove_file(struct bus_type *, struct bus_attribute *); | 48 | extern void bus_remove_file(struct bus_type *, struct bus_attribute *); |
49 | 49 | ||
50 | /** | ||
51 | * struct bus_type - The bus type of the device | ||
52 | * | ||
53 | * @name: The name of the bus. | ||
54 | * @bus_attrs: Default attributes of the bus. | ||
55 | * @dev_attrs: Default attributes of the devices on the bus. | ||
56 | * @drv_attrs: Default attributes of the device drivers on the bus. | ||
57 | * @match: Called, perhaps multiple times, whenever a new device or driver | ||
58 | * is added for this bus. It should return a nonzero value if the | ||
59 | * given device can be handled by the given driver. | ||
60 | * @uevent: Called when a device is added, removed, or a few other things | ||
61 | * that generate uevents to add the environment variables. | ||
62 | * @probe: Called when a new device or driver add to this bus, and callback | ||
63 | * the specific driver's probe to initial the matched device. | ||
64 | * @remove: Called when a device removed from this bus. | ||
65 | * @shutdown: Called at shut-down time to quiesce the device. | ||
66 | * @suspend: Called when a device on this bus wants to go to sleep mode. | ||
67 | * @resume: Called to bring a device on this bus out of sleep mode. | ||
68 | * @pm: Power management operations of this bus, callback the specific | ||
69 | * device driver's pm-ops. | ||
70 | * @p: The private data of the driver core, only the driver core can | ||
71 | * touch this. | ||
72 | * | ||
73 | * A bus is a channel between the processor and one or more devices. For the | ||
74 | * purposes of the device model, all devices are connected via a bus, even if | ||
75 | * it is an internal, virtual, "platform" bus. Buses can plug into each other. | ||
76 | * A USB controller is usually a PCI device, for example. The device model | ||
77 | * represents the actual connections between buses and the devices they control. | ||
78 | * A bus is represented by the bus_type structure. It contains the name, the | ||
79 | * default attributes, the bus' methods, PM operations, and the driver core's | ||
80 | * private data. | ||
81 | */ | ||
50 | struct bus_type { | 82 | struct bus_type { |
51 | const char *name; | 83 | const char *name; |
52 | struct bus_attribute *bus_attrs; | 84 | struct bus_attribute *bus_attrs; |
@@ -119,6 +151,37 @@ extern int bus_unregister_notifier(struct bus_type *bus, | |||
119 | extern struct kset *bus_get_kset(struct bus_type *bus); | 151 | extern struct kset *bus_get_kset(struct bus_type *bus); |
120 | extern struct klist *bus_get_device_klist(struct bus_type *bus); | 152 | extern struct klist *bus_get_device_klist(struct bus_type *bus); |
121 | 153 | ||
154 | /** | ||
155 | * struct device_driver - The basic device driver structure | ||
156 | * @name: Name of the device driver. | ||
157 | * @bus: The bus which the device of this driver belongs to. | ||
158 | * @owner: The module owner. | ||
159 | * @mod_name: Used for built-in modules. | ||
160 | * @suppress_bind_attrs: Disables bind/unbind via sysfs. | ||
161 | * @of_match_table: The open firmware table. | ||
162 | * @probe: Called to query the existence of a specific device, | ||
163 | * whether this driver can work with it, and bind the driver | ||
164 | * to a specific device. | ||
165 | * @remove: Called when the device is removed from the system to | ||
166 | * unbind a device from this driver. | ||
167 | * @shutdown: Called at shut-down time to quiesce the device. | ||
168 | * @suspend: Called to put the device to sleep mode. Usually to a | ||
169 | * low power state. | ||
170 | * @resume: Called to bring a device from sleep mode. | ||
171 | * @groups: Default attributes that get created by the driver core | ||
172 | * automatically. | ||
173 | * @pm: Power management operations of the device which matched | ||
174 | * this driver. | ||
175 | * @p: Driver core's private data, no one other than the driver | ||
176 | * core can touch this. | ||
177 | * | ||
178 | * The device driver-model tracks all of the drivers known to the system. | ||
179 | * The main reason for this tracking is to enable the driver core to match | ||
180 | * up drivers with new devices. Once drivers are known objects within the | ||
181 | * system, however, a number of other things become possible. Device drivers | ||
182 | * can export information and configuration variables that are independent | ||
183 | * of any specific device. | ||
184 | */ | ||
122 | struct device_driver { | 185 | struct device_driver { |
123 | const char *name; | 186 | const char *name; |
124 | struct bus_type *bus; | 187 | struct bus_type *bus; |
@@ -185,8 +248,34 @@ struct device *driver_find_device(struct device_driver *drv, | |||
185 | struct device *start, void *data, | 248 | struct device *start, void *data, |
186 | int (*match)(struct device *dev, void *data)); | 249 | int (*match)(struct device *dev, void *data)); |
187 | 250 | ||
188 | /* | 251 | /** |
189 | * device classes | 252 | * struct class - device classes |
253 | * @name: Name of the class. | ||
254 | * @owner: The module owner. | ||
255 | * @class_attrs: Default attributes of this class. | ||
256 | * @dev_attrs: Default attributes of the devices belong to the class. | ||
257 | * @dev_bin_attrs: Default binary attributes of the devices belong to the class. | ||
258 | * @dev_kobj: The kobject that represents this class and links it into the hierarchy. | ||
259 | * @dev_uevent: Called when a device is added, removed from this class, or a | ||
260 | * few other things that generate uevents to add the environment | ||
261 | * variables. | ||
262 | * @devnode: Callback to provide the devtmpfs. | ||
263 | * @class_release: Called to release this class. | ||
264 | * @dev_release: Called to release the device. | ||
265 | * @suspend: Used to put the device to sleep mode, usually to a low power | ||
266 | * state. | ||
267 | * @resume: Used to bring the device from the sleep mode. | ||
268 | * @ns_type: Callbacks so sysfs can detemine namespaces. | ||
269 | * @namespace: Namespace of the device belongs to this class. | ||
270 | * @pm: The default device power management operations of this class. | ||
271 | * @p: The private data of the driver core, no one other than the | ||
272 | * driver core can touch this. | ||
273 | * | ||
274 | * A class is a higher-level view of a device that abstracts out low-level | ||
275 | * implementation details. Drivers may see a SCSI disk or an ATA disk, but, | ||
276 | * at the class level, they are all simply disks. Classes allow user space | ||
277 | * to work with devices based on what they do, rather than how they are | ||
278 | * connected or how they work. | ||
190 | */ | 279 | */ |
191 | struct class { | 280 | struct class { |
192 | const char *name; | 281 | const char *name; |
@@ -401,6 +490,65 @@ struct device_dma_parameters { | |||
401 | unsigned long segment_boundary_mask; | 490 | unsigned long segment_boundary_mask; |
402 | }; | 491 | }; |
403 | 492 | ||
493 | /** | ||
494 | * struct device - The basic device structure | ||
495 | * @parent: The device's "parent" device, the device to which it is attached. | ||
496 | * In most cases, a parent device is some sort of bus or host | ||
497 | * controller. If parent is NULL, the device, is a top-level device, | ||
498 | * which is not usually what you want. | ||
499 | * @p: Holds the private data of the driver core portions of the device. | ||
500 | * See the comment of the struct device_private for detail. | ||
501 | * @kobj: A top-level, abstract class from which other classes are derived. | ||
502 | * @init_name: Initial name of the device. | ||
503 | * @type: The type of device. | ||
504 | * This identifies the device type and carries type-specific | ||
505 | * information. | ||
506 | * @mutex: Mutex to synchronize calls to its driver. | ||
507 | * @bus: Type of bus device is on. | ||
508 | * @driver: Which driver has allocated this | ||
509 | * @platform_data: Platform data specific to the device. | ||
510 | * Example: For devices on custom boards, as typical of embedded | ||
511 | * and SOC based hardware, Linux often uses platform_data to point | ||
512 | * to board-specific structures describing devices and how they | ||
513 | * are wired. That can include what ports are available, chip | ||
514 | * variants, which GPIO pins act in what additional roles, and so | ||
515 | * on. This shrinks the "Board Support Packages" (BSPs) and | ||
516 | * minimizes board-specific #ifdefs in drivers. | ||
517 | * @power: For device power management. | ||
518 | * See Documentation/power/devices.txt for details. | ||
519 | * @pwr_domain: Provide callbacks that are executed during system suspend, | ||
520 | * hibernation, system resume and during runtime PM transitions | ||
521 | * along with subsystem-level and driver-level callbacks. | ||
522 | * @numa_node: NUMA node this device is close to. | ||
523 | * @dma_mask: Dma mask (if dma'ble device). | ||
524 | * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all | ||
525 | * hardware supports 64-bit addresses for consistent allocations | ||
526 | * such descriptors. | ||
527 | * @dma_parms: A low level driver may set these to teach IOMMU code about | ||
528 | * segment limitations. | ||
529 | * @dma_pools: Dma pools (if dma'ble device). | ||
530 | * @dma_mem: Internal for coherent mem override. | ||
531 | * @archdata: For arch-specific additions. | ||
532 | * @of_node: Associated device tree node. | ||
533 | * @of_match: Matching of_device_id from driver. | ||
534 | * @devt: For creating the sysfs "dev". | ||
535 | * @devres_lock: Spinlock to protect the resource of the device. | ||
536 | * @devres_head: The resources list of the device. | ||
537 | * @knode_class: The node used to add the device to the class list. | ||
538 | * @class: The class of the device. | ||
539 | * @groups: Optional attribute groups. | ||
540 | * @release: Callback to free the device after all references have | ||
541 | * gone away. This should be set by the allocator of the | ||
542 | * device (i.e. the bus driver that discovered the device). | ||
543 | * | ||
544 | * At the lowest level, every device in a Linux system is represented by an | ||
545 | * instance of struct device. The device structure contains the information | ||
546 | * that the device model core needs to model the system. Most subsystems, | ||
547 | * however, track additional information about the devices they host. As a | ||
548 | * result, it is rare for devices to be represented by bare device structures; | ||
549 | * instead, that structure, like kobject structures, is usually embedded within | ||
550 | * a higher-level representation of the device. | ||
551 | */ | ||
404 | struct device { | 552 | struct device { |
405 | struct device *parent; | 553 | struct device *parent; |
406 | 554 | ||
@@ -408,7 +556,7 @@ struct device { | |||
408 | 556 | ||
409 | struct kobject kobj; | 557 | struct kobject kobj; |
410 | const char *init_name; /* initial name of the device */ | 558 | const char *init_name; /* initial name of the device */ |
411 | struct device_type *type; | 559 | const struct device_type *type; |
412 | 560 | ||
413 | struct mutex mutex; /* mutex to synchronize calls to | 561 | struct mutex mutex; /* mutex to synchronize calls to |
414 | * its driver. | 562 | * its driver. |
@@ -442,7 +590,6 @@ struct device { | |||
442 | struct dev_archdata archdata; | 590 | struct dev_archdata archdata; |
443 | 591 | ||
444 | struct device_node *of_node; /* associated device tree node */ | 592 | struct device_node *of_node; /* associated device tree node */ |
445 | const struct of_device_id *of_match; /* matching of_device_id from driver */ | ||
446 | 593 | ||
447 | dev_t devt; /* dev_t, creates the sysfs "dev" */ | 594 | dev_t devt; /* dev_t, creates the sysfs "dev" */ |
448 | 595 | ||
@@ -557,7 +704,7 @@ extern int device_move(struct device *dev, struct device *new_parent, | |||
557 | extern const char *device_get_devnode(struct device *dev, | 704 | extern const char *device_get_devnode(struct device *dev, |
558 | mode_t *mode, const char **tmp); | 705 | mode_t *mode, const char **tmp); |
559 | extern void *dev_get_drvdata(const struct device *dev); | 706 | extern void *dev_get_drvdata(const struct device *dev); |
560 | extern void dev_set_drvdata(struct device *dev, void *data); | 707 | extern int dev_set_drvdata(struct device *dev, void *data); |
561 | 708 | ||
562 | /* | 709 | /* |
563 | * Root device objects for grouping under /sys/devices | 710 | * Root device objects for grouping under /sys/devices |
@@ -611,7 +758,7 @@ extern int (*platform_notify)(struct device *dev); | |||
611 | extern int (*platform_notify_remove)(struct device *dev); | 758 | extern int (*platform_notify_remove)(struct device *dev); |
612 | 759 | ||
613 | 760 | ||
614 | /** | 761 | /* |
615 | * get_device - atomically increment the reference count for the device. | 762 | * get_device - atomically increment the reference count for the device. |
616 | * | 763 | * |
617 | */ | 764 | */ |
@@ -633,13 +780,6 @@ static inline int devtmpfs_mount(const char *mountpoint) { return 0; } | |||
633 | /* drivers/base/power/shutdown.c */ | 780 | /* drivers/base/power/shutdown.c */ |
634 | extern void device_shutdown(void); | 781 | extern void device_shutdown(void); |
635 | 782 | ||
636 | #ifndef CONFIG_ARCH_NO_SYSDEV_OPS | ||
637 | /* drivers/base/sys.c */ | ||
638 | extern void sysdev_shutdown(void); | ||
639 | #else | ||
640 | static inline void sysdev_shutdown(void) { } | ||
641 | #endif | ||
642 | |||
643 | /* debugging and troubleshooting/diagnostic helpers. */ | 783 | /* debugging and troubleshooting/diagnostic helpers. */ |
644 | extern const char *dev_driver_string(const struct device *dev); | 784 | extern const char *dev_driver_string(const struct device *dev); |
645 | 785 | ||
@@ -742,13 +882,17 @@ do { \ | |||
742 | #endif | 882 | #endif |
743 | 883 | ||
744 | /* | 884 | /* |
745 | * dev_WARN() acts like dev_printk(), but with the key difference | 885 | * dev_WARN*() acts like dev_printk(), but with the key difference |
746 | * of using a WARN/WARN_ON to get the message out, including the | 886 | * of using a WARN/WARN_ON to get the message out, including the |
747 | * file/line information and a backtrace. | 887 | * file/line information and a backtrace. |
748 | */ | 888 | */ |
749 | #define dev_WARN(dev, format, arg...) \ | 889 | #define dev_WARN(dev, format, arg...) \ |
750 | WARN(1, "Device: %s\n" format, dev_driver_string(dev), ## arg); | 890 | WARN(1, "Device: %s\n" format, dev_driver_string(dev), ## arg); |
751 | 891 | ||
892 | #define dev_WARN_ONCE(dev, condition, format, arg...) \ | ||
893 | WARN_ONCE(condition, "Device %s\n" format, \ | ||
894 | dev_driver_string(dev), ## arg) | ||
895 | |||
752 | /* Create alias, so I can be autoloaded. */ | 896 | /* Create alias, so I can be autoloaded. */ |
753 | #define MODULE_ALIAS_CHARDEV(major,minor) \ | 897 | #define MODULE_ALIAS_CHARDEV(major,minor) \ |
754 | MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) | 898 | MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) |
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 0c9653f11c18..e747ecd48e1c 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _DYNAMIC_DEBUG_H | 1 | #ifndef _DYNAMIC_DEBUG_H |
2 | #define _DYNAMIC_DEBUG_H | 2 | #define _DYNAMIC_DEBUG_H |
3 | 3 | ||
4 | #include <linux/jump_label.h> | ||
5 | |||
6 | /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which | 4 | /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which |
7 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They | 5 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They |
8 | * use independent hash functions, to reduce the chance of false positives. | 6 | * use independent hash functions, to reduce the chance of false positives. |
diff --git a/include/linux/fb.h b/include/linux/fb.h index df728c1c29ed..6a8274877171 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
@@ -832,6 +832,7 @@ struct fb_tile_ops { | |||
832 | #define FBINFO_CAN_FORCE_OUTPUT 0x200000 | 832 | #define FBINFO_CAN_FORCE_OUTPUT 0x200000 |
833 | 833 | ||
834 | struct fb_info { | 834 | struct fb_info { |
835 | atomic_t count; | ||
835 | int node; | 836 | int node; |
836 | int flags; | 837 | int flags; |
837 | struct mutex lock; /* Lock for open/release/ioctl funcs */ | 838 | struct mutex lock; /* Lock for open/release/ioctl funcs */ |
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h index 70e4efabe0fb..ebeb2f3ad068 100644 --- a/include/linux/flex_array.h +++ b/include/linux/flex_array.h | |||
@@ -61,7 +61,7 @@ struct flex_array { | |||
61 | struct flex_array *flex_array_alloc(int element_size, unsigned int total, | 61 | struct flex_array *flex_array_alloc(int element_size, unsigned int total, |
62 | gfp_t flags); | 62 | gfp_t flags); |
63 | int flex_array_prealloc(struct flex_array *fa, unsigned int start, | 63 | int flex_array_prealloc(struct flex_array *fa, unsigned int start, |
64 | unsigned int end, gfp_t flags); | 64 | unsigned int nr_elements, gfp_t flags); |
65 | void flex_array_free(struct flex_array *fa); | 65 | void flex_array_free(struct flex_array *fa); |
66 | void flex_array_free_parts(struct flex_array *fa); | 66 | void flex_array_free_parts(struct flex_array *fa); |
67 | int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, | 67 | int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, |
diff --git a/include/linux/fs.h b/include/linux/fs.h index dbd860af0804..cdf9495df204 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -358,7 +358,6 @@ struct inodes_stat_t { | |||
358 | #define FS_EXTENT_FL 0x00080000 /* Extents */ | 358 | #define FS_EXTENT_FL 0x00080000 /* Extents */ |
359 | #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ | 359 | #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ |
360 | #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ | 360 | #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ |
361 | #define FS_COW_FL 0x02000000 /* Cow file */ | ||
362 | #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ | 361 | #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ |
363 | 362 | ||
364 | #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ | 363 | #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index ca29e03c1fac..9d88e1cb5dbb 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -29,9 +29,22 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
29 | 29 | ||
30 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); | 30 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); |
31 | 31 | ||
32 | struct ftrace_hash; | ||
33 | |||
34 | enum { | ||
35 | FTRACE_OPS_FL_ENABLED = 1 << 0, | ||
36 | FTRACE_OPS_FL_GLOBAL = 1 << 1, | ||
37 | FTRACE_OPS_FL_DYNAMIC = 1 << 2, | ||
38 | }; | ||
39 | |||
32 | struct ftrace_ops { | 40 | struct ftrace_ops { |
33 | ftrace_func_t func; | 41 | ftrace_func_t func; |
34 | struct ftrace_ops *next; | 42 | struct ftrace_ops *next; |
43 | unsigned long flags; | ||
44 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
45 | struct ftrace_hash *notrace_hash; | ||
46 | struct ftrace_hash *filter_hash; | ||
47 | #endif | ||
35 | }; | 48 | }; |
36 | 49 | ||
37 | extern int function_trace_stop; | 50 | extern int function_trace_stop; |
@@ -146,14 +159,13 @@ extern void unregister_ftrace_function_probe_all(char *glob); | |||
146 | extern int ftrace_text_reserved(void *start, void *end); | 159 | extern int ftrace_text_reserved(void *start, void *end); |
147 | 160 | ||
148 | enum { | 161 | enum { |
149 | FTRACE_FL_FREE = (1 << 0), | 162 | FTRACE_FL_ENABLED = (1 << 30), |
150 | FTRACE_FL_FAILED = (1 << 1), | 163 | FTRACE_FL_FREE = (1 << 31), |
151 | FTRACE_FL_FILTER = (1 << 2), | ||
152 | FTRACE_FL_ENABLED = (1 << 3), | ||
153 | FTRACE_FL_NOTRACE = (1 << 4), | ||
154 | FTRACE_FL_CONVERTED = (1 << 5), | ||
155 | }; | 164 | }; |
156 | 165 | ||
166 | #define FTRACE_FL_MASK (0x3UL << 30) | ||
167 | #define FTRACE_REF_MAX ((1 << 30) - 1) | ||
168 | |||
157 | struct dyn_ftrace { | 169 | struct dyn_ftrace { |
158 | union { | 170 | union { |
159 | unsigned long ip; /* address of mcount call-site */ | 171 | unsigned long ip; /* address of mcount call-site */ |
@@ -167,7 +179,12 @@ struct dyn_ftrace { | |||
167 | }; | 179 | }; |
168 | 180 | ||
169 | int ftrace_force_update(void); | 181 | int ftrace_force_update(void); |
170 | void ftrace_set_filter(unsigned char *buf, int len, int reset); | 182 | void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
183 | int len, int reset); | ||
184 | void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, | ||
185 | int len, int reset); | ||
186 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset); | ||
187 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); | ||
171 | 188 | ||
172 | int register_ftrace_command(struct ftrace_func_command *cmd); | 189 | int register_ftrace_command(struct ftrace_func_command *cmd); |
173 | int unregister_ftrace_command(struct ftrace_func_command *cmd); | 190 | int unregister_ftrace_command(struct ftrace_func_command *cmd); |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 22b32af1b5ec..b5a550a39a70 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -37,6 +37,7 @@ struct trace_entry { | |||
37 | unsigned char flags; | 37 | unsigned char flags; |
38 | unsigned char preempt_count; | 38 | unsigned char preempt_count; |
39 | int pid; | 39 | int pid; |
40 | int padding; | ||
40 | }; | 41 | }; |
41 | 42 | ||
42 | #define FTRACE_MAX_EVENT \ | 43 | #define FTRACE_MAX_EVENT \ |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index bfb8f934521e..56d8fc87fbbc 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -353,6 +353,8 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask); | |||
353 | 353 | ||
354 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); | 354 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); |
355 | void free_pages_exact(void *virt, size_t size); | 355 | void free_pages_exact(void *virt, size_t size); |
356 | /* This is different from alloc_pages_exact_node !!! */ | ||
357 | void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); | ||
356 | 358 | ||
357 | #define __get_free_page(gfp_mask) \ | 359 | #define __get_free_page(gfp_mask) \ |
358 | __get_free_pages((gfp_mask), 0) | 360 | __get_free_pages((gfp_mask), 0) |
diff --git a/include/linux/init.h b/include/linux/init.h index 577671c55153..9146f39cdddf 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
@@ -79,29 +79,29 @@ | |||
79 | #define __exitused __used | 79 | #define __exitused __used |
80 | #endif | 80 | #endif |
81 | 81 | ||
82 | #define __exit __section(.exit.text) __exitused __cold | 82 | #define __exit __section(.exit.text) __exitused __cold notrace |
83 | 83 | ||
84 | /* Used for HOTPLUG */ | 84 | /* Used for HOTPLUG */ |
85 | #define __devinit __section(.devinit.text) __cold | 85 | #define __devinit __section(.devinit.text) __cold notrace |
86 | #define __devinitdata __section(.devinit.data) | 86 | #define __devinitdata __section(.devinit.data) |
87 | #define __devinitconst __section(.devinit.rodata) | 87 | #define __devinitconst __section(.devinit.rodata) |
88 | #define __devexit __section(.devexit.text) __exitused __cold | 88 | #define __devexit __section(.devexit.text) __exitused __cold notrace |
89 | #define __devexitdata __section(.devexit.data) | 89 | #define __devexitdata __section(.devexit.data) |
90 | #define __devexitconst __section(.devexit.rodata) | 90 | #define __devexitconst __section(.devexit.rodata) |
91 | 91 | ||
92 | /* Used for HOTPLUG_CPU */ | 92 | /* Used for HOTPLUG_CPU */ |
93 | #define __cpuinit __section(.cpuinit.text) __cold | 93 | #define __cpuinit __section(.cpuinit.text) __cold notrace |
94 | #define __cpuinitdata __section(.cpuinit.data) | 94 | #define __cpuinitdata __section(.cpuinit.data) |
95 | #define __cpuinitconst __section(.cpuinit.rodata) | 95 | #define __cpuinitconst __section(.cpuinit.rodata) |
96 | #define __cpuexit __section(.cpuexit.text) __exitused __cold | 96 | #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace |
97 | #define __cpuexitdata __section(.cpuexit.data) | 97 | #define __cpuexitdata __section(.cpuexit.data) |
98 | #define __cpuexitconst __section(.cpuexit.rodata) | 98 | #define __cpuexitconst __section(.cpuexit.rodata) |
99 | 99 | ||
100 | /* Used for MEMORY_HOTPLUG */ | 100 | /* Used for MEMORY_HOTPLUG */ |
101 | #define __meminit __section(.meminit.text) __cold | 101 | #define __meminit __section(.meminit.text) __cold notrace |
102 | #define __meminitdata __section(.meminit.data) | 102 | #define __meminitdata __section(.meminit.data) |
103 | #define __meminitconst __section(.meminit.rodata) | 103 | #define __meminitconst __section(.meminit.rodata) |
104 | #define __memexit __section(.memexit.text) __exitused __cold | 104 | #define __memexit __section(.memexit.text) __exitused __cold notrace |
105 | #define __memexitdata __section(.memexit.data) | 105 | #define __memexitdata __section(.memexit.data) |
106 | #define __memexitconst __section(.memexit.rodata) | 106 | #define __memexitconst __section(.memexit.rodata) |
107 | 107 | ||
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index caa151fbebb7..689496bb6654 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -134,7 +134,6 @@ extern struct cred init_cred; | |||
134 | .stack = &init_thread_info, \ | 134 | .stack = &init_thread_info, \ |
135 | .usage = ATOMIC_INIT(2), \ | 135 | .usage = ATOMIC_INIT(2), \ |
136 | .flags = PF_KTHREAD, \ | 136 | .flags = PF_KTHREAD, \ |
137 | .lock_depth = -1, \ | ||
138 | .prio = MAX_PRIO-20, \ | 137 | .prio = MAX_PRIO-20, \ |
139 | .static_prio = MAX_PRIO-20, \ | 138 | .static_prio = MAX_PRIO-20, \ |
140 | .normal_prio = MAX_PRIO-20, \ | 139 | .normal_prio = MAX_PRIO-20, \ |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index bea0ac750712..6c12989839d9 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -414,7 +414,6 @@ enum | |||
414 | TASKLET_SOFTIRQ, | 414 | TASKLET_SOFTIRQ, |
415 | SCHED_SOFTIRQ, | 415 | SCHED_SOFTIRQ, |
416 | HRTIMER_SOFTIRQ, | 416 | HRTIMER_SOFTIRQ, |
417 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ | ||
418 | 417 | ||
419 | NR_SOFTIRQS | 418 | NR_SOFTIRQS |
420 | }; | 419 | }; |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 09a308072f56..8b4538446636 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -53,12 +53,13 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data); | |||
53 | * Bits which can be modified via irq_set/clear/modify_status_flags() | 53 | * Bits which can be modified via irq_set/clear/modify_status_flags() |
54 | * IRQ_LEVEL - Interrupt is level type. Will be also | 54 | * IRQ_LEVEL - Interrupt is level type. Will be also |
55 | * updated in the code when the above trigger | 55 | * updated in the code when the above trigger |
56 | * bits are modified via set_irq_type() | 56 | * bits are modified via irq_set_irq_type() |
57 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect | 57 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect |
58 | * it from affinity setting | 58 | * it from affinity setting |
59 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing | 59 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing |
60 | * IRQ_NOREQUEST - Interrupt cannot be requested via | 60 | * IRQ_NOREQUEST - Interrupt cannot be requested via |
61 | * request_irq() | 61 | * request_irq() |
62 | * IRQ_NOTHREAD - Interrupt cannot be threaded | ||
62 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in | 63 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in |
63 | * request/setup_irq() | 64 | * request/setup_irq() |
64 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) | 65 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) |
@@ -85,6 +86,7 @@ enum { | |||
85 | IRQ_NO_BALANCING = (1 << 13), | 86 | IRQ_NO_BALANCING = (1 << 13), |
86 | IRQ_MOVE_PCNTXT = (1 << 14), | 87 | IRQ_MOVE_PCNTXT = (1 << 14), |
87 | IRQ_NESTED_THREAD = (1 << 15), | 88 | IRQ_NESTED_THREAD = (1 << 15), |
89 | IRQ_NOTHREAD = (1 << 16), | ||
88 | }; | 90 | }; |
89 | 91 | ||
90 | #define IRQF_MODIFY_MASK \ | 92 | #define IRQF_MODIFY_MASK \ |
@@ -261,23 +263,6 @@ static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) | |||
261 | * struct irq_chip - hardware interrupt chip descriptor | 263 | * struct irq_chip - hardware interrupt chip descriptor |
262 | * | 264 | * |
263 | * @name: name for /proc/interrupts | 265 | * @name: name for /proc/interrupts |
264 | * @startup: deprecated, replaced by irq_startup | ||
265 | * @shutdown: deprecated, replaced by irq_shutdown | ||
266 | * @enable: deprecated, replaced by irq_enable | ||
267 | * @disable: deprecated, replaced by irq_disable | ||
268 | * @ack: deprecated, replaced by irq_ack | ||
269 | * @mask: deprecated, replaced by irq_mask | ||
270 | * @mask_ack: deprecated, replaced by irq_mask_ack | ||
271 | * @unmask: deprecated, replaced by irq_unmask | ||
272 | * @eoi: deprecated, replaced by irq_eoi | ||
273 | * @end: deprecated, will go away with __do_IRQ() | ||
274 | * @set_affinity: deprecated, replaced by irq_set_affinity | ||
275 | * @retrigger: deprecated, replaced by irq_retrigger | ||
276 | * @set_type: deprecated, replaced by irq_set_type | ||
277 | * @set_wake: deprecated, replaced by irq_wake | ||
278 | * @bus_lock: deprecated, replaced by irq_bus_lock | ||
279 | * @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock | ||
280 | * | ||
281 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) | 266 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) |
282 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) | 267 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) |
283 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) | 268 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) |
@@ -295,6 +280,9 @@ static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) | |||
295 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips | 280 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips |
296 | * @irq_cpu_online: configure an interrupt source for a secondary CPU | 281 | * @irq_cpu_online: configure an interrupt source for a secondary CPU |
297 | * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU | 282 | * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU |
283 | * @irq_suspend: function called from core code on suspend once per chip | ||
284 | * @irq_resume: function called from core code on resume once per chip | ||
285 | * @irq_pm_shutdown: function called from core code on shutdown once per chip | ||
298 | * @irq_print_chip: optional to print special chip info in show_interrupts | 286 | * @irq_print_chip: optional to print special chip info in show_interrupts |
299 | * @flags: chip specific flags | 287 | * @flags: chip specific flags |
300 | * | 288 | * |
@@ -324,6 +312,10 @@ struct irq_chip { | |||
324 | void (*irq_cpu_online)(struct irq_data *data); | 312 | void (*irq_cpu_online)(struct irq_data *data); |
325 | void (*irq_cpu_offline)(struct irq_data *data); | 313 | void (*irq_cpu_offline)(struct irq_data *data); |
326 | 314 | ||
315 | void (*irq_suspend)(struct irq_data *data); | ||
316 | void (*irq_resume)(struct irq_data *data); | ||
317 | void (*irq_pm_shutdown)(struct irq_data *data); | ||
318 | |||
327 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); | 319 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); |
328 | 320 | ||
329 | unsigned long flags; | 321 | unsigned long flags; |
@@ -439,7 +431,7 @@ irq_set_handler(unsigned int irq, irq_flow_handler_t handle) | |||
439 | /* | 431 | /* |
440 | * Set a highlevel chained flow handler for a given IRQ. | 432 | * Set a highlevel chained flow handler for a given IRQ. |
441 | * (a chained handler is automatically enabled and set to | 433 | * (a chained handler is automatically enabled and set to |
442 | * IRQ_NOREQUEST and IRQ_NOPROBE) | 434 | * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) |
443 | */ | 435 | */ |
444 | static inline void | 436 | static inline void |
445 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) | 437 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) |
@@ -469,6 +461,16 @@ static inline void irq_set_probe(unsigned int irq) | |||
469 | irq_modify_status(irq, IRQ_NOPROBE, 0); | 461 | irq_modify_status(irq, IRQ_NOPROBE, 0); |
470 | } | 462 | } |
471 | 463 | ||
464 | static inline void irq_set_nothread(unsigned int irq) | ||
465 | { | ||
466 | irq_modify_status(irq, 0, IRQ_NOTHREAD); | ||
467 | } | ||
468 | |||
469 | static inline void irq_set_thread(unsigned int irq) | ||
470 | { | ||
471 | irq_modify_status(irq, IRQ_NOTHREAD, 0); | ||
472 | } | ||
473 | |||
472 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) | 474 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) |
473 | { | 475 | { |
474 | if (nest) | 476 | if (nest) |
@@ -573,6 +575,145 @@ static inline int irq_reserve_irq(unsigned int irq) | |||
573 | return irq_reserve_irqs(irq, 1); | 575 | return irq_reserve_irqs(irq, 1); |
574 | } | 576 | } |
575 | 577 | ||
578 | #ifndef irq_reg_writel | ||
579 | # define irq_reg_writel(val, addr) writel(val, addr) | ||
580 | #endif | ||
581 | #ifndef irq_reg_readl | ||
582 | # define irq_reg_readl(addr) readl(addr) | ||
583 | #endif | ||
584 | |||
585 | /** | ||
586 | * struct irq_chip_regs - register offsets for struct irq_gci | ||
587 | * @enable: Enable register offset to reg_base | ||
588 | * @disable: Disable register offset to reg_base | ||
589 | * @mask: Mask register offset to reg_base | ||
590 | * @ack: Ack register offset to reg_base | ||
591 | * @eoi: Eoi register offset to reg_base | ||
592 | * @type: Type configuration register offset to reg_base | ||
593 | * @polarity: Polarity configuration register offset to reg_base | ||
594 | */ | ||
595 | struct irq_chip_regs { | ||
596 | unsigned long enable; | ||
597 | unsigned long disable; | ||
598 | unsigned long mask; | ||
599 | unsigned long ack; | ||
600 | unsigned long eoi; | ||
601 | unsigned long type; | ||
602 | unsigned long polarity; | ||
603 | }; | ||
604 | |||
605 | /** | ||
606 | * struct irq_chip_type - Generic interrupt chip instance for a flow type | ||
607 | * @chip: The real interrupt chip which provides the callbacks | ||
608 | * @regs: Register offsets for this chip | ||
609 | * @handler: Flow handler associated with this chip | ||
610 | * @type: Chip can handle these flow types | ||
611 | * | ||
612 | * A irq_generic_chip can have several instances of irq_chip_type when | ||
613 | * it requires different functions and register offsets for different | ||
614 | * flow types. | ||
615 | */ | ||
616 | struct irq_chip_type { | ||
617 | struct irq_chip chip; | ||
618 | struct irq_chip_regs regs; | ||
619 | irq_flow_handler_t handler; | ||
620 | u32 type; | ||
621 | }; | ||
622 | |||
623 | /** | ||
624 | * struct irq_chip_generic - Generic irq chip data structure | ||
625 | * @lock: Lock to protect register and cache data access | ||
626 | * @reg_base: Register base address (virtual) | ||
627 | * @irq_base: Interrupt base nr for this chip | ||
628 | * @irq_cnt: Number of interrupts handled by this chip | ||
629 | * @mask_cache: Cached mask register | ||
630 | * @type_cache: Cached type register | ||
631 | * @polarity_cache: Cached polarity register | ||
632 | * @wake_enabled: Interrupt can wakeup from suspend | ||
633 | * @wake_active: Interrupt is marked as an wakeup from suspend source | ||
634 | * @num_ct: Number of available irq_chip_type instances (usually 1) | ||
635 | * @private: Private data for non generic chip callbacks | ||
636 | * @list: List head for keeping track of instances | ||
637 | * @chip_types: Array of interrupt irq_chip_types | ||
638 | * | ||
639 | * Note, that irq_chip_generic can have multiple irq_chip_type | ||
640 | * implementations which can be associated to a particular irq line of | ||
641 | * an irq_chip_generic instance. That allows to share and protect | ||
642 | * state in an irq_chip_generic instance when we need to implement | ||
643 | * different flow mechanisms (level/edge) for it. | ||
644 | */ | ||
645 | struct irq_chip_generic { | ||
646 | raw_spinlock_t lock; | ||
647 | void __iomem *reg_base; | ||
648 | unsigned int irq_base; | ||
649 | unsigned int irq_cnt; | ||
650 | u32 mask_cache; | ||
651 | u32 type_cache; | ||
652 | u32 polarity_cache; | ||
653 | u32 wake_enabled; | ||
654 | u32 wake_active; | ||
655 | unsigned int num_ct; | ||
656 | void *private; | ||
657 | struct list_head list; | ||
658 | struct irq_chip_type chip_types[0]; | ||
659 | }; | ||
660 | |||
661 | /** | ||
662 | * enum irq_gc_flags - Initialization flags for generic irq chips | ||
663 | * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg | ||
664 | * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for | ||
665 | * irq chips which need to call irq_set_wake() on | ||
666 | * the parent irq. Usually GPIO implementations | ||
667 | */ | ||
668 | enum irq_gc_flags { | ||
669 | IRQ_GC_INIT_MASK_CACHE = 1 << 0, | ||
670 | IRQ_GC_INIT_NESTED_LOCK = 1 << 1, | ||
671 | }; | ||
672 | |||
673 | /* Generic chip callback functions */ | ||
674 | void irq_gc_noop(struct irq_data *d); | ||
675 | void irq_gc_mask_disable_reg(struct irq_data *d); | ||
676 | void irq_gc_mask_set_bit(struct irq_data *d); | ||
677 | void irq_gc_mask_clr_bit(struct irq_data *d); | ||
678 | void irq_gc_unmask_enable_reg(struct irq_data *d); | ||
679 | void irq_gc_ack(struct irq_data *d); | ||
680 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); | ||
681 | void irq_gc_eoi(struct irq_data *d); | ||
682 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); | ||
683 | |||
684 | /* Setup functions for irq_chip_generic */ | ||
685 | struct irq_chip_generic * | ||
686 | irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, | ||
687 | void __iomem *reg_base, irq_flow_handler_t handler); | ||
688 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, | ||
689 | enum irq_gc_flags flags, unsigned int clr, | ||
690 | unsigned int set); | ||
691 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type); | ||
692 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, | ||
693 | unsigned int clr, unsigned int set); | ||
694 | |||
695 | static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) | ||
696 | { | ||
697 | return container_of(d->chip, struct irq_chip_type, chip); | ||
698 | } | ||
699 | |||
700 | #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) | ||
701 | |||
702 | #ifdef CONFIG_SMP | ||
703 | static inline void irq_gc_lock(struct irq_chip_generic *gc) | ||
704 | { | ||
705 | raw_spin_lock(&gc->lock); | ||
706 | } | ||
707 | |||
708 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) | ||
709 | { | ||
710 | raw_spin_unlock(&gc->lock); | ||
711 | } | ||
712 | #else | ||
713 | static inline void irq_gc_lock(struct irq_chip_generic *gc) { } | ||
714 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } | ||
715 | #endif | ||
716 | |||
576 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 717 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
577 | 718 | ||
578 | #endif /* !CONFIG_S390 */ | 719 | #endif /* !CONFIG_S390 */ |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index a082905b5ebe..2d921b35212c 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
@@ -16,16 +16,18 @@ struct timer_rand_state; | |||
16 | * @irq_data: per irq and chip data passed down to chip functions | 16 | * @irq_data: per irq and chip data passed down to chip functions |
17 | * @timer_rand_state: pointer to timer rand state struct | 17 | * @timer_rand_state: pointer to timer rand state struct |
18 | * @kstat_irqs: irq stats per cpu | 18 | * @kstat_irqs: irq stats per cpu |
19 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | 19 | * @handle_irq: highlevel irq-events handler |
20 | * @preflow_handler: handler called before the flow handler (currently used by sparc) | ||
20 | * @action: the irq action chain | 21 | * @action: the irq action chain |
21 | * @status: status information | 22 | * @status: status information |
22 | * @core_internal_state__do_not_mess_with_it: core internal status information | 23 | * @core_internal_state__do_not_mess_with_it: core internal status information |
23 | * @depth: disable-depth, for nested irq_disable() calls | 24 | * @depth: disable-depth, for nested irq_disable() calls |
24 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | 25 | * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers |
25 | * @irq_count: stats field to detect stalled irqs | 26 | * @irq_count: stats field to detect stalled irqs |
26 | * @last_unhandled: aging timer for unhandled count | 27 | * @last_unhandled: aging timer for unhandled count |
27 | * @irqs_unhandled: stats field for spurious unhandled interrupts | 28 | * @irqs_unhandled: stats field for spurious unhandled interrupts |
28 | * @lock: locking for SMP | 29 | * @lock: locking for SMP |
30 | * @affinity_hint: hint to user space for preferred irq affinity | ||
29 | * @affinity_notify: context for notification of affinity changes | 31 | * @affinity_notify: context for notification of affinity changes |
30 | * @pending_mask: pending rebalanced interrupts | 32 | * @pending_mask: pending rebalanced interrupts |
31 | * @threads_oneshot: bitfield to handle shared oneshot threads | 33 | * @threads_oneshot: bitfield to handle shared oneshot threads |
@@ -109,10 +111,7 @@ static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *de | |||
109 | desc->handle_irq(irq, desc); | 111 | desc->handle_irq(irq, desc); |
110 | } | 112 | } |
111 | 113 | ||
112 | static inline void generic_handle_irq(unsigned int irq) | 114 | int generic_handle_irq(unsigned int irq); |
113 | { | ||
114 | generic_handle_irq_desc(irq, irq_to_desc(irq)); | ||
115 | } | ||
116 | 115 | ||
117 | /* Test to see if a driver has successfully requested an irq */ | 116 | /* Test to see if a driver has successfully requested an irq */ |
118 | static inline int irq_has_action(unsigned int irq) | 117 | static inline int irq_has_action(unsigned int irq) |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 7880f18e4b86..83e745f3ead7 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
@@ -1,20 +1,43 @@ | |||
1 | #ifndef _LINUX_JUMP_LABEL_H | 1 | #ifndef _LINUX_JUMP_LABEL_H |
2 | #define _LINUX_JUMP_LABEL_H | 2 | #define _LINUX_JUMP_LABEL_H |
3 | 3 | ||
4 | #include <linux/types.h> | ||
5 | #include <linux/compiler.h> | ||
6 | |||
4 | #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) | 7 | #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) |
8 | |||
9 | struct jump_label_key { | ||
10 | atomic_t enabled; | ||
11 | struct jump_entry *entries; | ||
12 | #ifdef CONFIG_MODULES | ||
13 | struct jump_label_mod *next; | ||
14 | #endif | ||
15 | }; | ||
16 | |||
5 | # include <asm/jump_label.h> | 17 | # include <asm/jump_label.h> |
6 | # define HAVE_JUMP_LABEL | 18 | # define HAVE_JUMP_LABEL |
7 | #endif | 19 | #endif |
8 | 20 | ||
9 | enum jump_label_type { | 21 | enum jump_label_type { |
22 | JUMP_LABEL_DISABLE = 0, | ||
10 | JUMP_LABEL_ENABLE, | 23 | JUMP_LABEL_ENABLE, |
11 | JUMP_LABEL_DISABLE | ||
12 | }; | 24 | }; |
13 | 25 | ||
14 | struct module; | 26 | struct module; |
15 | 27 | ||
16 | #ifdef HAVE_JUMP_LABEL | 28 | #ifdef HAVE_JUMP_LABEL |
17 | 29 | ||
30 | #ifdef CONFIG_MODULES | ||
31 | #define JUMP_LABEL_INIT {{ 0 }, NULL, NULL} | ||
32 | #else | ||
33 | #define JUMP_LABEL_INIT {{ 0 }, NULL} | ||
34 | #endif | ||
35 | |||
36 | static __always_inline bool static_branch(struct jump_label_key *key) | ||
37 | { | ||
38 | return arch_static_branch(key); | ||
39 | } | ||
40 | |||
18 | extern struct jump_entry __start___jump_table[]; | 41 | extern struct jump_entry __start___jump_table[]; |
19 | extern struct jump_entry __stop___jump_table[]; | 42 | extern struct jump_entry __stop___jump_table[]; |
20 | 43 | ||
@@ -23,37 +46,37 @@ extern void jump_label_unlock(void); | |||
23 | extern void arch_jump_label_transform(struct jump_entry *entry, | 46 | extern void arch_jump_label_transform(struct jump_entry *entry, |
24 | enum jump_label_type type); | 47 | enum jump_label_type type); |
25 | extern void arch_jump_label_text_poke_early(jump_label_t addr); | 48 | extern void arch_jump_label_text_poke_early(jump_label_t addr); |
26 | extern void jump_label_update(unsigned long key, enum jump_label_type type); | ||
27 | extern void jump_label_apply_nops(struct module *mod); | ||
28 | extern int jump_label_text_reserved(void *start, void *end); | 49 | extern int jump_label_text_reserved(void *start, void *end); |
50 | extern void jump_label_inc(struct jump_label_key *key); | ||
51 | extern void jump_label_dec(struct jump_label_key *key); | ||
52 | extern bool jump_label_enabled(struct jump_label_key *key); | ||
53 | extern void jump_label_apply_nops(struct module *mod); | ||
29 | 54 | ||
30 | #define jump_label_enable(key) \ | 55 | #else |
31 | jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); | ||
32 | 56 | ||
33 | #define jump_label_disable(key) \ | 57 | #include <asm/atomic.h> |
34 | jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE); | ||
35 | 58 | ||
36 | #else | 59 | #define JUMP_LABEL_INIT {ATOMIC_INIT(0)} |
37 | 60 | ||
38 | #define JUMP_LABEL(key, label) \ | 61 | struct jump_label_key { |
39 | do { \ | 62 | atomic_t enabled; |
40 | if (unlikely(*key)) \ | 63 | }; |
41 | goto label; \ | ||
42 | } while (0) | ||
43 | 64 | ||
44 | #define jump_label_enable(cond_var) \ | 65 | static __always_inline bool static_branch(struct jump_label_key *key) |
45 | do { \ | 66 | { |
46 | *(cond_var) = 1; \ | 67 | if (unlikely(atomic_read(&key->enabled))) |
47 | } while (0) | 68 | return true; |
69 | return false; | ||
70 | } | ||
48 | 71 | ||
49 | #define jump_label_disable(cond_var) \ | 72 | static inline void jump_label_inc(struct jump_label_key *key) |
50 | do { \ | 73 | { |
51 | *(cond_var) = 0; \ | 74 | atomic_inc(&key->enabled); |
52 | } while (0) | 75 | } |
53 | 76 | ||
54 | static inline int jump_label_apply_nops(struct module *mod) | 77 | static inline void jump_label_dec(struct jump_label_key *key) |
55 | { | 78 | { |
56 | return 0; | 79 | atomic_dec(&key->enabled); |
57 | } | 80 | } |
58 | 81 | ||
59 | static inline int jump_label_text_reserved(void *start, void *end) | 82 | static inline int jump_label_text_reserved(void *start, void *end) |
@@ -64,16 +87,16 @@ static inline int jump_label_text_reserved(void *start, void *end) | |||
64 | static inline void jump_label_lock(void) {} | 87 | static inline void jump_label_lock(void) {} |
65 | static inline void jump_label_unlock(void) {} | 88 | static inline void jump_label_unlock(void) {} |
66 | 89 | ||
67 | #endif | 90 | static inline bool jump_label_enabled(struct jump_label_key *key) |
91 | { | ||
92 | return !!atomic_read(&key->enabled); | ||
93 | } | ||
68 | 94 | ||
69 | #define COND_STMT(key, stmt) \ | 95 | static inline int jump_label_apply_nops(struct module *mod) |
70 | do { \ | 96 | { |
71 | __label__ jl_enabled; \ | 97 | return 0; |
72 | JUMP_LABEL(key, jl_enabled); \ | 98 | } |
73 | if (0) { \ | 99 | |
74 | jl_enabled: \ | 100 | #endif |
75 | stmt; \ | ||
76 | } \ | ||
77 | } while (0) | ||
78 | 101 | ||
79 | #endif | 102 | #endif |
diff --git a/include/linux/jump_label_ref.h b/include/linux/jump_label_ref.h deleted file mode 100644 index e5d012ad92c6..000000000000 --- a/include/linux/jump_label_ref.h +++ /dev/null | |||
@@ -1,44 +0,0 @@ | |||
1 | #ifndef _LINUX_JUMP_LABEL_REF_H | ||
2 | #define _LINUX_JUMP_LABEL_REF_H | ||
3 | |||
4 | #include <linux/jump_label.h> | ||
5 | #include <asm/atomic.h> | ||
6 | |||
7 | #ifdef HAVE_JUMP_LABEL | ||
8 | |||
9 | static inline void jump_label_inc(atomic_t *key) | ||
10 | { | ||
11 | if (atomic_add_return(1, key) == 1) | ||
12 | jump_label_enable(key); | ||
13 | } | ||
14 | |||
15 | static inline void jump_label_dec(atomic_t *key) | ||
16 | { | ||
17 | if (atomic_dec_and_test(key)) | ||
18 | jump_label_disable(key); | ||
19 | } | ||
20 | |||
21 | #else /* !HAVE_JUMP_LABEL */ | ||
22 | |||
23 | static inline void jump_label_inc(atomic_t *key) | ||
24 | { | ||
25 | atomic_inc(key); | ||
26 | } | ||
27 | |||
28 | static inline void jump_label_dec(atomic_t *key) | ||
29 | { | ||
30 | atomic_dec(key); | ||
31 | } | ||
32 | |||
33 | #undef JUMP_LABEL | ||
34 | #define JUMP_LABEL(key, label) \ | ||
35 | do { \ | ||
36 | if (unlikely(__builtin_choose_expr( \ | ||
37 | __builtin_types_compatible_p(typeof(key), atomic_t *), \ | ||
38 | atomic_read((atomic_t *)(key)), *(key)))) \ | ||
39 | goto label; \ | ||
40 | } while (0) | ||
41 | |||
42 | #endif /* HAVE_JUMP_LABEL */ | ||
43 | |||
44 | #endif /* _LINUX_JUMP_LABEL_REF_H */ | ||
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 00cec4dc0ae2..f37ba716ef8b 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -283,6 +283,7 @@ extern char *get_options(const char *str, int nints, int *ints); | |||
283 | extern unsigned long long memparse(const char *ptr, char **retptr); | 283 | extern unsigned long long memparse(const char *ptr, char **retptr); |
284 | 284 | ||
285 | extern int core_kernel_text(unsigned long addr); | 285 | extern int core_kernel_text(unsigned long addr); |
286 | extern int core_kernel_data(unsigned long addr); | ||
286 | extern int __kernel_text_address(unsigned long addr); | 287 | extern int __kernel_text_address(unsigned long addr); |
287 | extern int kernel_text_address(unsigned long addr); | 288 | extern int kernel_text_address(unsigned long addr); |
288 | extern int func_ptr_is_kernel_text(void *ptr); | 289 | extern int func_ptr_is_kernel_text(void *ptr); |
diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 6efd7a78de6a..310231823852 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h | |||
@@ -113,5 +113,6 @@ extern void usermodehelper_init(void); | |||
113 | 113 | ||
114 | extern int usermodehelper_disable(void); | 114 | extern int usermodehelper_disable(void); |
115 | extern void usermodehelper_enable(void); | 115 | extern void usermodehelper_enable(void); |
116 | extern bool usermodehelper_is_disabled(void); | ||
116 | 117 | ||
117 | #endif /* __LINUX_KMOD_H__ */ | 118 | #endif /* __LINUX_KMOD_H__ */ |
diff --git a/include/linux/list.h b/include/linux/list.h index 3a54266a1e85..cc6d2aa6b415 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
6 | #include <linux/poison.h> | 6 | #include <linux/poison.h> |
7 | #include <linux/prefetch.h> | 7 | #include <linux/const.h> |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Simple doubly linked list implementation. | 10 | * Simple doubly linked list implementation. |
@@ -367,18 +367,15 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
367 | * @head: the head for your list. | 367 | * @head: the head for your list. |
368 | */ | 368 | */ |
369 | #define list_for_each(pos, head) \ | 369 | #define list_for_each(pos, head) \ |
370 | for (pos = (head)->next; prefetch(pos->next), pos != (head); \ | 370 | for (pos = (head)->next; pos != (head); pos = pos->next) |
371 | pos = pos->next) | ||
372 | 371 | ||
373 | /** | 372 | /** |
374 | * __list_for_each - iterate over a list | 373 | * __list_for_each - iterate over a list |
375 | * @pos: the &struct list_head to use as a loop cursor. | 374 | * @pos: the &struct list_head to use as a loop cursor. |
376 | * @head: the head for your list. | 375 | * @head: the head for your list. |
377 | * | 376 | * |
378 | * This variant differs from list_for_each() in that it's the | 377 | * This variant doesn't differ from list_for_each() any more. |
379 | * simplest possible list iteration code, no prefetching is done. | 378 | * We don't do prefetching in either case. |
380 | * Use this for code that knows the list to be very short (empty | ||
381 | * or 1 entry) most of the time. | ||
382 | */ | 379 | */ |
383 | #define __list_for_each(pos, head) \ | 380 | #define __list_for_each(pos, head) \ |
384 | for (pos = (head)->next; pos != (head); pos = pos->next) | 381 | for (pos = (head)->next; pos != (head); pos = pos->next) |
@@ -389,8 +386,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
389 | * @head: the head for your list. | 386 | * @head: the head for your list. |
390 | */ | 387 | */ |
391 | #define list_for_each_prev(pos, head) \ | 388 | #define list_for_each_prev(pos, head) \ |
392 | for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ | 389 | for (pos = (head)->prev; pos != (head); pos = pos->prev) |
393 | pos = pos->prev) | ||
394 | 390 | ||
395 | /** | 391 | /** |
396 | * list_for_each_safe - iterate over a list safe against removal of list entry | 392 | * list_for_each_safe - iterate over a list safe against removal of list entry |
@@ -410,7 +406,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
410 | */ | 406 | */ |
411 | #define list_for_each_prev_safe(pos, n, head) \ | 407 | #define list_for_each_prev_safe(pos, n, head) \ |
412 | for (pos = (head)->prev, n = pos->prev; \ | 408 | for (pos = (head)->prev, n = pos->prev; \ |
413 | prefetch(pos->prev), pos != (head); \ | 409 | pos != (head); \ |
414 | pos = n, n = pos->prev) | 410 | pos = n, n = pos->prev) |
415 | 411 | ||
416 | /** | 412 | /** |
@@ -421,7 +417,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
421 | */ | 417 | */ |
422 | #define list_for_each_entry(pos, head, member) \ | 418 | #define list_for_each_entry(pos, head, member) \ |
423 | for (pos = list_entry((head)->next, typeof(*pos), member); \ | 419 | for (pos = list_entry((head)->next, typeof(*pos), member); \ |
424 | prefetch(pos->member.next), &pos->member != (head); \ | 420 | &pos->member != (head); \ |
425 | pos = list_entry(pos->member.next, typeof(*pos), member)) | 421 | pos = list_entry(pos->member.next, typeof(*pos), member)) |
426 | 422 | ||
427 | /** | 423 | /** |
@@ -432,7 +428,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
432 | */ | 428 | */ |
433 | #define list_for_each_entry_reverse(pos, head, member) \ | 429 | #define list_for_each_entry_reverse(pos, head, member) \ |
434 | for (pos = list_entry((head)->prev, typeof(*pos), member); \ | 430 | for (pos = list_entry((head)->prev, typeof(*pos), member); \ |
435 | prefetch(pos->member.prev), &pos->member != (head); \ | 431 | &pos->member != (head); \ |
436 | pos = list_entry(pos->member.prev, typeof(*pos), member)) | 432 | pos = list_entry(pos->member.prev, typeof(*pos), member)) |
437 | 433 | ||
438 | /** | 434 | /** |
@@ -457,7 +453,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
457 | */ | 453 | */ |
458 | #define list_for_each_entry_continue(pos, head, member) \ | 454 | #define list_for_each_entry_continue(pos, head, member) \ |
459 | for (pos = list_entry(pos->member.next, typeof(*pos), member); \ | 455 | for (pos = list_entry(pos->member.next, typeof(*pos), member); \ |
460 | prefetch(pos->member.next), &pos->member != (head); \ | 456 | &pos->member != (head); \ |
461 | pos = list_entry(pos->member.next, typeof(*pos), member)) | 457 | pos = list_entry(pos->member.next, typeof(*pos), member)) |
462 | 458 | ||
463 | /** | 459 | /** |
@@ -471,7 +467,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
471 | */ | 467 | */ |
472 | #define list_for_each_entry_continue_reverse(pos, head, member) \ | 468 | #define list_for_each_entry_continue_reverse(pos, head, member) \ |
473 | for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ | 469 | for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ |
474 | prefetch(pos->member.prev), &pos->member != (head); \ | 470 | &pos->member != (head); \ |
475 | pos = list_entry(pos->member.prev, typeof(*pos), member)) | 471 | pos = list_entry(pos->member.prev, typeof(*pos), member)) |
476 | 472 | ||
477 | /** | 473 | /** |
@@ -483,7 +479,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
483 | * Iterate over list of given type, continuing from current position. | 479 | * Iterate over list of given type, continuing from current position. |
484 | */ | 480 | */ |
485 | #define list_for_each_entry_from(pos, head, member) \ | 481 | #define list_for_each_entry_from(pos, head, member) \ |
486 | for (; prefetch(pos->member.next), &pos->member != (head); \ | 482 | for (; &pos->member != (head); \ |
487 | pos = list_entry(pos->member.next, typeof(*pos), member)) | 483 | pos = list_entry(pos->member.next, typeof(*pos), member)) |
488 | 484 | ||
489 | /** | 485 | /** |
@@ -664,8 +660,7 @@ static inline void hlist_move_list(struct hlist_head *old, | |||
664 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) | 660 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) |
665 | 661 | ||
666 | #define hlist_for_each(pos, head) \ | 662 | #define hlist_for_each(pos, head) \ |
667 | for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ | 663 | for (pos = (head)->first; pos ; pos = pos->next) |
668 | pos = pos->next) | ||
669 | 664 | ||
670 | #define hlist_for_each_safe(pos, n, head) \ | 665 | #define hlist_for_each_safe(pos, n, head) \ |
671 | for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ | 666 | for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ |
@@ -680,7 +675,7 @@ static inline void hlist_move_list(struct hlist_head *old, | |||
680 | */ | 675 | */ |
681 | #define hlist_for_each_entry(tpos, pos, head, member) \ | 676 | #define hlist_for_each_entry(tpos, pos, head, member) \ |
682 | for (pos = (head)->first; \ | 677 | for (pos = (head)->first; \ |
683 | pos && ({ prefetch(pos->next); 1;}) && \ | 678 | pos && \ |
684 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | 679 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
685 | pos = pos->next) | 680 | pos = pos->next) |
686 | 681 | ||
@@ -692,7 +687,7 @@ static inline void hlist_move_list(struct hlist_head *old, | |||
692 | */ | 687 | */ |
693 | #define hlist_for_each_entry_continue(tpos, pos, member) \ | 688 | #define hlist_for_each_entry_continue(tpos, pos, member) \ |
694 | for (pos = (pos)->next; \ | 689 | for (pos = (pos)->next; \ |
695 | pos && ({ prefetch(pos->next); 1;}) && \ | 690 | pos && \ |
696 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | 691 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
697 | pos = pos->next) | 692 | pos = pos->next) |
698 | 693 | ||
@@ -703,7 +698,7 @@ static inline void hlist_move_list(struct hlist_head *old, | |||
703 | * @member: the name of the hlist_node within the struct. | 698 | * @member: the name of the hlist_node within the struct. |
704 | */ | 699 | */ |
705 | #define hlist_for_each_entry_from(tpos, pos, member) \ | 700 | #define hlist_for_each_entry_from(tpos, pos, member) \ |
706 | for (; pos && ({ prefetch(pos->next); 1;}) && \ | 701 | for (; pos && \ |
707 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | 702 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
708 | pos = pos->next) | 703 | pos = pos->next) |
709 | 704 | ||
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h index afe4db49402d..632d1567a1b6 100644 --- a/include/linux/mfd/wm831x/pdata.h +++ b/include/linux/mfd/wm831x/pdata.h | |||
@@ -81,7 +81,9 @@ struct wm831x_touch_pdata { | |||
81 | int rpu; /** Pen down sensitivity resistor divider */ | 81 | int rpu; /** Pen down sensitivity resistor divider */ |
82 | int pressure; /** Report pressure (boolean) */ | 82 | int pressure; /** Report pressure (boolean) */ |
83 | unsigned int data_irq; /** Touch data ready IRQ */ | 83 | unsigned int data_irq; /** Touch data ready IRQ */ |
84 | int data_irqf; /** IRQ flags for data ready IRQ */ | ||
84 | unsigned int pd_irq; /** Touch pendown detect IRQ */ | 85 | unsigned int pd_irq; /** Touch pendown detect IRQ */ |
86 | int pd_irqf; /** IRQ flags for pen down IRQ */ | ||
85 | }; | 87 | }; |
86 | 88 | ||
87 | enum wm831x_watchdog_action { | 89 | enum wm831x_watchdog_action { |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 2348db26bc3d..6507dde38b16 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1011,11 +1011,33 @@ int set_page_dirty_lock(struct page *page); | |||
1011 | int clear_page_dirty_for_io(struct page *page); | 1011 | int clear_page_dirty_for_io(struct page *page); |
1012 | 1012 | ||
1013 | /* Is the vma a continuation of the stack vma above it? */ | 1013 | /* Is the vma a continuation of the stack vma above it? */ |
1014 | static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) | 1014 | static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) |
1015 | { | 1015 | { |
1016 | return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); | 1016 | return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); |
1017 | } | 1017 | } |
1018 | 1018 | ||
1019 | static inline int stack_guard_page_start(struct vm_area_struct *vma, | ||
1020 | unsigned long addr) | ||
1021 | { | ||
1022 | return (vma->vm_flags & VM_GROWSDOWN) && | ||
1023 | (vma->vm_start == addr) && | ||
1024 | !vma_growsdown(vma->vm_prev, addr); | ||
1025 | } | ||
1026 | |||
1027 | /* Is the vma a continuation of the stack vma below it? */ | ||
1028 | static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) | ||
1029 | { | ||
1030 | return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); | ||
1031 | } | ||
1032 | |||
1033 | static inline int stack_guard_page_end(struct vm_area_struct *vma, | ||
1034 | unsigned long addr) | ||
1035 | { | ||
1036 | return (vma->vm_flags & VM_GROWSUP) && | ||
1037 | (vma->vm_end == addr) && | ||
1038 | !vma_growsup(vma->vm_next, addr); | ||
1039 | } | ||
1040 | |||
1019 | extern unsigned long move_page_tables(struct vm_area_struct *vma, | 1041 | extern unsigned long move_page_tables(struct vm_area_struct *vma, |
1020 | unsigned long old_addr, struct vm_area_struct *new_vma, | 1042 | unsigned long old_addr, struct vm_area_struct *new_vma, |
1021 | unsigned long new_addr, unsigned long len); | 1043 | unsigned long new_addr, unsigned long len); |
diff --git a/include/linux/module.h b/include/linux/module.h index 5de42043dff0..d9ca2d5dc6d0 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -64,6 +64,9 @@ struct module_version_attribute { | |||
64 | const char *version; | 64 | const char *version; |
65 | } __attribute__ ((__aligned__(sizeof(void *)))); | 65 | } __attribute__ ((__aligned__(sizeof(void *)))); |
66 | 66 | ||
67 | extern ssize_t __modver_version_show(struct module_attribute *, | ||
68 | struct module *, char *); | ||
69 | |||
67 | struct module_kobject | 70 | struct module_kobject |
68 | { | 71 | { |
69 | struct kobject kobj; | 72 | struct kobject kobj; |
@@ -172,12 +175,7 @@ extern struct module __this_module; | |||
172 | #define MODULE_VERSION(_version) MODULE_INFO(version, _version) | 175 | #define MODULE_VERSION(_version) MODULE_INFO(version, _version) |
173 | #else | 176 | #else |
174 | #define MODULE_VERSION(_version) \ | 177 | #define MODULE_VERSION(_version) \ |
175 | extern ssize_t __modver_version_show(struct module_attribute *, \ | 178 | static struct module_version_attribute ___modver_attr = { \ |
176 | struct module *, char *); \ | ||
177 | static struct module_version_attribute __modver_version_attr \ | ||
178 | __used \ | ||
179 | __attribute__ ((__section__ ("__modver"),aligned(sizeof(void *)))) \ | ||
180 | = { \ | ||
181 | .mattr = { \ | 179 | .mattr = { \ |
182 | .attr = { \ | 180 | .attr = { \ |
183 | .name = "version", \ | 181 | .name = "version", \ |
@@ -187,7 +185,10 @@ extern struct module __this_module; | |||
187 | }, \ | 185 | }, \ |
188 | .module_name = KBUILD_MODNAME, \ | 186 | .module_name = KBUILD_MODNAME, \ |
189 | .version = _version, \ | 187 | .version = _version, \ |
190 | } | 188 | }; \ |
189 | static const struct module_version_attribute \ | ||
190 | __used __attribute__ ((__section__ ("__modver"))) \ | ||
191 | * __moduleparam_const __modver_attr = &___modver_attr | ||
191 | #endif | 192 | #endif |
192 | 193 | ||
193 | /* Optional firmware file (or files) needed by the module | 194 | /* Optional firmware file (or files) needed by the module |
@@ -223,7 +224,7 @@ struct module_use { | |||
223 | extern void *__crc_##sym __attribute__((weak)); \ | 224 | extern void *__crc_##sym __attribute__((weak)); \ |
224 | static const unsigned long __kcrctab_##sym \ | 225 | static const unsigned long __kcrctab_##sym \ |
225 | __used \ | 226 | __used \ |
226 | __attribute__((section("__kcrctab" sec), unused)) \ | 227 | __attribute__((section("___kcrctab" sec "+" #sym), unused)) \ |
227 | = (unsigned long) &__crc_##sym; | 228 | = (unsigned long) &__crc_##sym; |
228 | #else | 229 | #else |
229 | #define __CRC_SYMBOL(sym, sec) | 230 | #define __CRC_SYMBOL(sym, sec) |
@@ -238,7 +239,7 @@ struct module_use { | |||
238 | = MODULE_SYMBOL_PREFIX #sym; \ | 239 | = MODULE_SYMBOL_PREFIX #sym; \ |
239 | static const struct kernel_symbol __ksymtab_##sym \ | 240 | static const struct kernel_symbol __ksymtab_##sym \ |
240 | __used \ | 241 | __used \ |
241 | __attribute__((section("__ksymtab" sec), unused)) \ | 242 | __attribute__((section("___ksymtab" sec "+" #sym), unused)) \ |
242 | = { (unsigned long)&sym, __kstrtab_##sym } | 243 | = { (unsigned long)&sym, __kstrtab_##sym } |
243 | 244 | ||
244 | #define EXPORT_SYMBOL(sym) \ | 245 | #define EXPORT_SYMBOL(sym) \ |
@@ -367,34 +368,35 @@ struct module | |||
367 | struct module_notes_attrs *notes_attrs; | 368 | struct module_notes_attrs *notes_attrs; |
368 | #endif | 369 | #endif |
369 | 370 | ||
371 | /* The command line arguments (may be mangled). People like | ||
372 | keeping pointers to this stuff */ | ||
373 | char *args; | ||
374 | |||
370 | #ifdef CONFIG_SMP | 375 | #ifdef CONFIG_SMP |
371 | /* Per-cpu data. */ | 376 | /* Per-cpu data. */ |
372 | void __percpu *percpu; | 377 | void __percpu *percpu; |
373 | unsigned int percpu_size; | 378 | unsigned int percpu_size; |
374 | #endif | 379 | #endif |
375 | 380 | ||
376 | /* The command line arguments (may be mangled). People like | ||
377 | keeping pointers to this stuff */ | ||
378 | char *args; | ||
379 | #ifdef CONFIG_TRACEPOINTS | 381 | #ifdef CONFIG_TRACEPOINTS |
380 | struct tracepoint * const *tracepoints_ptrs; | ||
381 | unsigned int num_tracepoints; | 382 | unsigned int num_tracepoints; |
383 | struct tracepoint * const *tracepoints_ptrs; | ||
382 | #endif | 384 | #endif |
383 | #ifdef HAVE_JUMP_LABEL | 385 | #ifdef HAVE_JUMP_LABEL |
384 | struct jump_entry *jump_entries; | 386 | struct jump_entry *jump_entries; |
385 | unsigned int num_jump_entries; | 387 | unsigned int num_jump_entries; |
386 | #endif | 388 | #endif |
387 | #ifdef CONFIG_TRACING | 389 | #ifdef CONFIG_TRACING |
388 | const char **trace_bprintk_fmt_start; | ||
389 | unsigned int num_trace_bprintk_fmt; | 390 | unsigned int num_trace_bprintk_fmt; |
391 | const char **trace_bprintk_fmt_start; | ||
390 | #endif | 392 | #endif |
391 | #ifdef CONFIG_EVENT_TRACING | 393 | #ifdef CONFIG_EVENT_TRACING |
392 | struct ftrace_event_call **trace_events; | 394 | struct ftrace_event_call **trace_events; |
393 | unsigned int num_trace_events; | 395 | unsigned int num_trace_events; |
394 | #endif | 396 | #endif |
395 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 397 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
396 | unsigned long *ftrace_callsites; | ||
397 | unsigned int num_ftrace_callsites; | 398 | unsigned int num_ftrace_callsites; |
399 | unsigned long *ftrace_callsites; | ||
398 | #endif | 400 | #endif |
399 | 401 | ||
400 | #ifdef CONFIG_MODULE_UNLOAD | 402 | #ifdef CONFIG_MODULE_UNLOAD |
@@ -475,8 +477,9 @@ const struct kernel_symbol *find_symbol(const char *name, | |||
475 | bool warn); | 477 | bool warn); |
476 | 478 | ||
477 | /* Walk the exported symbol table */ | 479 | /* Walk the exported symbol table */ |
478 | bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner, | 480 | bool each_symbol_section(bool (*fn)(const struct symsearch *arr, |
479 | unsigned int symnum, void *data), void *data); | 481 | struct module *owner, |
482 | void *data), void *data); | ||
480 | 483 | ||
481 | /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if | 484 | /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if |
482 | symnum out of range. */ | 485 | symnum out of range. */ |
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 07b41951e3fa..ddaae98c53f9 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h | |||
@@ -67,9 +67,9 @@ struct kparam_string { | |||
67 | struct kparam_array | 67 | struct kparam_array |
68 | { | 68 | { |
69 | unsigned int max; | 69 | unsigned int max; |
70 | unsigned int elemsize; | ||
70 | unsigned int *num; | 71 | unsigned int *num; |
71 | const struct kernel_param_ops *ops; | 72 | const struct kernel_param_ops *ops; |
72 | unsigned int elemsize; | ||
73 | void *elem; | 73 | void *elem; |
74 | }; | 74 | }; |
75 | 75 | ||
@@ -371,8 +371,9 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp); | |||
371 | */ | 371 | */ |
372 | #define module_param_array_named(name, array, type, nump, perm) \ | 372 | #define module_param_array_named(name, array, type, nump, perm) \ |
373 | static const struct kparam_array __param_arr_##name \ | 373 | static const struct kparam_array __param_arr_##name \ |
374 | = { ARRAY_SIZE(array), nump, ¶m_ops_##type, \ | 374 | = { .max = ARRAY_SIZE(array), .num = nump, \ |
375 | sizeof(array[0]), array }; \ | 375 | .ops = ¶m_ops_##type, \ |
376 | .elemsize = sizeof(array[0]), .elem = array }; \ | ||
376 | __module_param_call(MODULE_PARAM_PREFIX, name, \ | 377 | __module_param_call(MODULE_PARAM_PREFIX, name, \ |
377 | ¶m_array_ops, \ | 378 | ¶m_array_ops, \ |
378 | .arr = &__param_arr_##name, \ | 379 | .arr = &__param_arr_##name, \ |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 94b48bd40dd7..c75471db576e 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -51,7 +51,7 @@ struct mutex { | |||
51 | spinlock_t wait_lock; | 51 | spinlock_t wait_lock; |
52 | struct list_head wait_list; | 52 | struct list_head wait_list; |
53 | #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) | 53 | #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) |
54 | struct thread_info *owner; | 54 | struct task_struct *owner; |
55 | #endif | 55 | #endif |
56 | #ifdef CONFIG_DEBUG_MUTEXES | 56 | #ifdef CONFIG_DEBUG_MUTEXES |
57 | const char *name; | 57 | const char *name; |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 890dce242639..7e371f7df9c4 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -233,6 +233,7 @@ struct nfs4_layoutget { | |||
233 | struct nfs4_layoutget_args args; | 233 | struct nfs4_layoutget_args args; |
234 | struct nfs4_layoutget_res res; | 234 | struct nfs4_layoutget_res res; |
235 | struct pnfs_layout_segment **lsegpp; | 235 | struct pnfs_layout_segment **lsegpp; |
236 | gfp_t gfp_flags; | ||
236 | }; | 237 | }; |
237 | 238 | ||
238 | struct nfs4_getdeviceinfo_args { | 239 | struct nfs4_getdeviceinfo_args { |
diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 8bfe6c1d4365..ae5638480ef2 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h | |||
@@ -21,8 +21,7 @@ extern void of_device_make_bus_id(struct device *dev); | |||
21 | static inline int of_driver_match_device(struct device *dev, | 21 | static inline int of_driver_match_device(struct device *dev, |
22 | const struct device_driver *drv) | 22 | const struct device_driver *drv) |
23 | { | 23 | { |
24 | dev->of_match = of_match_device(drv->of_match_table, dev); | 24 | return of_match_device(drv->of_match_table, dev) != NULL; |
25 | return dev->of_match != NULL; | ||
26 | } | 25 | } |
27 | 26 | ||
28 | extern struct platform_device *of_dev_get(struct platform_device *dev); | 27 | extern struct platform_device *of_dev_get(struct platform_device *dev); |
@@ -58,6 +57,11 @@ static inline int of_device_uevent(struct device *dev, | |||
58 | 57 | ||
59 | static inline void of_device_node_put(struct device *dev) { } | 58 | static inline void of_device_node_put(struct device *dev) { } |
60 | 59 | ||
60 | static inline const struct of_device_id *of_match_device( | ||
61 | const struct of_device_id *matches, const struct device *dev) | ||
62 | { | ||
63 | return NULL; | ||
64 | } | ||
61 | #endif /* CONFIG_OF_DEVICE */ | 65 | #endif /* CONFIG_OF_DEVICE */ |
62 | 66 | ||
63 | #endif /* _LINUX_OF_DEVICE_H */ | 67 | #endif /* _LINUX_OF_DEVICE_H */ |
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h new file mode 100644 index 000000000000..655824fa4c76 --- /dev/null +++ b/include/linux/pci-ats.h | |||
@@ -0,0 +1,52 @@ | |||
1 | #ifndef LINUX_PCI_ATS_H | ||
2 | #define LINUX_PCI_ATS_H | ||
3 | |||
4 | /* Address Translation Service */ | ||
5 | struct pci_ats { | ||
6 | int pos; /* capability position */ | ||
7 | int stu; /* Smallest Translation Unit */ | ||
8 | int qdep; /* Invalidate Queue Depth */ | ||
9 | int ref_cnt; /* Physical Function reference count */ | ||
10 | unsigned int is_enabled:1; /* Enable bit is set */ | ||
11 | }; | ||
12 | |||
13 | #ifdef CONFIG_PCI_IOV | ||
14 | |||
15 | extern int pci_enable_ats(struct pci_dev *dev, int ps); | ||
16 | extern void pci_disable_ats(struct pci_dev *dev); | ||
17 | extern int pci_ats_queue_depth(struct pci_dev *dev); | ||
18 | /** | ||
19 | * pci_ats_enabled - query the ATS status | ||
20 | * @dev: the PCI device | ||
21 | * | ||
22 | * Returns 1 if ATS capability is enabled, or 0 if not. | ||
23 | */ | ||
24 | static inline int pci_ats_enabled(struct pci_dev *dev) | ||
25 | { | ||
26 | return dev->ats && dev->ats->is_enabled; | ||
27 | } | ||
28 | |||
29 | #else /* CONFIG_PCI_IOV */ | ||
30 | |||
31 | static inline int pci_enable_ats(struct pci_dev *dev, int ps) | ||
32 | { | ||
33 | return -ENODEV; | ||
34 | } | ||
35 | |||
36 | static inline void pci_disable_ats(struct pci_dev *dev) | ||
37 | { | ||
38 | } | ||
39 | |||
40 | static inline int pci_ats_queue_depth(struct pci_dev *dev) | ||
41 | { | ||
42 | return -ENODEV; | ||
43 | } | ||
44 | |||
45 | static inline int pci_ats_enabled(struct pci_dev *dev) | ||
46 | { | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | #endif /* CONFIG_PCI_IOV */ | ||
51 | |||
52 | #endif /* LINUX_PCI_ATS_H*/ | ||
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 3a5c4449fd36..8b97308e65df 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -948,7 +948,7 @@ do { \ | |||
948 | irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | 948 | irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
949 | # endif | 949 | # endif |
950 | # define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | 950 | # define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
951 | __pcpu_double_call_return_int(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) | 951 | __pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) |
952 | #endif | 952 | #endif |
953 | 953 | ||
954 | #endif /* __LINUX_PERCPU_H */ | 954 | #endif /* __LINUX_PERCPU_H */ |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index ee9f1e782800..3412684ce5d5 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -2,8 +2,8 @@ | |||
2 | * Performance events: | 2 | * Performance events: |
3 | * | 3 | * |
4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | 4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> |
5 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar | 5 | * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar |
6 | * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra | 6 | * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra |
7 | * | 7 | * |
8 | * Data type definitions, declarations, prototypes. | 8 | * Data type definitions, declarations, prototypes. |
9 | * | 9 | * |
@@ -52,6 +52,8 @@ enum perf_hw_id { | |||
52 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, | 52 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, |
53 | PERF_COUNT_HW_BRANCH_MISSES = 5, | 53 | PERF_COUNT_HW_BRANCH_MISSES = 5, |
54 | PERF_COUNT_HW_BUS_CYCLES = 6, | 54 | PERF_COUNT_HW_BUS_CYCLES = 6, |
55 | PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, | ||
56 | PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, | ||
55 | 57 | ||
56 | PERF_COUNT_HW_MAX, /* non-ABI */ | 58 | PERF_COUNT_HW_MAX, /* non-ABI */ |
57 | }; | 59 | }; |
@@ -468,9 +470,9 @@ enum perf_callchain_context { | |||
468 | PERF_CONTEXT_MAX = (__u64)-4095, | 470 | PERF_CONTEXT_MAX = (__u64)-4095, |
469 | }; | 471 | }; |
470 | 472 | ||
471 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) | 473 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) |
472 | #define PERF_FLAG_FD_OUTPUT (1U << 1) | 474 | #define PERF_FLAG_FD_OUTPUT (1U << 1) |
473 | #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ | 475 | #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ |
474 | 476 | ||
475 | #ifdef __KERNEL__ | 477 | #ifdef __KERNEL__ |
476 | /* | 478 | /* |
@@ -484,9 +486,9 @@ enum perf_callchain_context { | |||
484 | #endif | 486 | #endif |
485 | 487 | ||
486 | struct perf_guest_info_callbacks { | 488 | struct perf_guest_info_callbacks { |
487 | int (*is_in_guest) (void); | 489 | int (*is_in_guest)(void); |
488 | int (*is_user_mode) (void); | 490 | int (*is_user_mode)(void); |
489 | unsigned long (*get_guest_ip) (void); | 491 | unsigned long (*get_guest_ip)(void); |
490 | }; | 492 | }; |
491 | 493 | ||
492 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 494 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
@@ -505,7 +507,7 @@ struct perf_guest_info_callbacks { | |||
505 | #include <linux/ftrace.h> | 507 | #include <linux/ftrace.h> |
506 | #include <linux/cpu.h> | 508 | #include <linux/cpu.h> |
507 | #include <linux/irq_work.h> | 509 | #include <linux/irq_work.h> |
508 | #include <linux/jump_label_ref.h> | 510 | #include <linux/jump_label.h> |
509 | #include <asm/atomic.h> | 511 | #include <asm/atomic.h> |
510 | #include <asm/local.h> | 512 | #include <asm/local.h> |
511 | 513 | ||
@@ -652,19 +654,19 @@ struct pmu { | |||
652 | * Start the transaction, after this ->add() doesn't need to | 654 | * Start the transaction, after this ->add() doesn't need to |
653 | * do schedulability tests. | 655 | * do schedulability tests. |
654 | */ | 656 | */ |
655 | void (*start_txn) (struct pmu *pmu); /* optional */ | 657 | void (*start_txn) (struct pmu *pmu); /* optional */ |
656 | /* | 658 | /* |
657 | * If ->start_txn() disabled the ->add() schedulability test | 659 | * If ->start_txn() disabled the ->add() schedulability test |
658 | * then ->commit_txn() is required to perform one. On success | 660 | * then ->commit_txn() is required to perform one. On success |
659 | * the transaction is closed. On error the transaction is kept | 661 | * the transaction is closed. On error the transaction is kept |
660 | * open until ->cancel_txn() is called. | 662 | * open until ->cancel_txn() is called. |
661 | */ | 663 | */ |
662 | int (*commit_txn) (struct pmu *pmu); /* optional */ | 664 | int (*commit_txn) (struct pmu *pmu); /* optional */ |
663 | /* | 665 | /* |
664 | * Will cancel the transaction, assumes ->del() is called | 666 | * Will cancel the transaction, assumes ->del() is called |
665 | * for each successful ->add() during the transaction. | 667 | * for each successful ->add() during the transaction. |
666 | */ | 668 | */ |
667 | void (*cancel_txn) (struct pmu *pmu); /* optional */ | 669 | void (*cancel_txn) (struct pmu *pmu); /* optional */ |
668 | }; | 670 | }; |
669 | 671 | ||
670 | /** | 672 | /** |
@@ -712,15 +714,15 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | |||
712 | struct pt_regs *regs); | 714 | struct pt_regs *regs); |
713 | 715 | ||
714 | enum perf_group_flag { | 716 | enum perf_group_flag { |
715 | PERF_GROUP_SOFTWARE = 0x1, | 717 | PERF_GROUP_SOFTWARE = 0x1, |
716 | }; | 718 | }; |
717 | 719 | ||
718 | #define SWEVENT_HLIST_BITS 8 | 720 | #define SWEVENT_HLIST_BITS 8 |
719 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) | 721 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) |
720 | 722 | ||
721 | struct swevent_hlist { | 723 | struct swevent_hlist { |
722 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; | 724 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; |
723 | struct rcu_head rcu_head; | 725 | struct rcu_head rcu_head; |
724 | }; | 726 | }; |
725 | 727 | ||
726 | #define PERF_ATTACH_CONTEXT 0x01 | 728 | #define PERF_ATTACH_CONTEXT 0x01 |
@@ -733,13 +735,13 @@ struct swevent_hlist { | |||
733 | * This is a per-cpu dynamically allocated data structure. | 735 | * This is a per-cpu dynamically allocated data structure. |
734 | */ | 736 | */ |
735 | struct perf_cgroup_info { | 737 | struct perf_cgroup_info { |
736 | u64 time; | 738 | u64 time; |
737 | u64 timestamp; | 739 | u64 timestamp; |
738 | }; | 740 | }; |
739 | 741 | ||
740 | struct perf_cgroup { | 742 | struct perf_cgroup { |
741 | struct cgroup_subsys_state css; | 743 | struct cgroup_subsys_state css; |
742 | struct perf_cgroup_info *info; /* timing info, one per cpu */ | 744 | struct perf_cgroup_info *info; /* timing info, one per cpu */ |
743 | }; | 745 | }; |
744 | #endif | 746 | #endif |
745 | 747 | ||
@@ -923,7 +925,7 @@ struct perf_event_context { | |||
923 | 925 | ||
924 | /* | 926 | /* |
925 | * Number of contexts where an event can trigger: | 927 | * Number of contexts where an event can trigger: |
926 | * task, softirq, hardirq, nmi. | 928 | * task, softirq, hardirq, nmi. |
927 | */ | 929 | */ |
928 | #define PERF_NR_CONTEXTS 4 | 930 | #define PERF_NR_CONTEXTS 4 |
929 | 931 | ||
@@ -1001,8 +1003,7 @@ struct perf_sample_data { | |||
1001 | struct perf_raw_record *raw; | 1003 | struct perf_raw_record *raw; |
1002 | }; | 1004 | }; |
1003 | 1005 | ||
1004 | static inline | 1006 | static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) |
1005 | void perf_sample_data_init(struct perf_sample_data *data, u64 addr) | ||
1006 | { | 1007 | { |
1007 | data->addr = addr; | 1008 | data->addr = addr; |
1008 | data->raw = NULL; | 1009 | data->raw = NULL; |
@@ -1034,13 +1035,12 @@ static inline int is_software_event(struct perf_event *event) | |||
1034 | return event->pmu->task_ctx_nr == perf_sw_context; | 1035 | return event->pmu->task_ctx_nr == perf_sw_context; |
1035 | } | 1036 | } |
1036 | 1037 | ||
1037 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 1038 | extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
1038 | 1039 | ||
1039 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 1040 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); |
1040 | 1041 | ||
1041 | #ifndef perf_arch_fetch_caller_regs | 1042 | #ifndef perf_arch_fetch_caller_regs |
1042 | static inline void | 1043 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } |
1043 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } | ||
1044 | #endif | 1044 | #endif |
1045 | 1045 | ||
1046 | /* | 1046 | /* |
@@ -1063,26 +1063,24 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | |||
1063 | { | 1063 | { |
1064 | struct pt_regs hot_regs; | 1064 | struct pt_regs hot_regs; |
1065 | 1065 | ||
1066 | JUMP_LABEL(&perf_swevent_enabled[event_id], have_event); | 1066 | if (static_branch(&perf_swevent_enabled[event_id])) { |
1067 | return; | 1067 | if (!regs) { |
1068 | 1068 | perf_fetch_caller_regs(&hot_regs); | |
1069 | have_event: | 1069 | regs = &hot_regs; |
1070 | if (!regs) { | 1070 | } |
1071 | perf_fetch_caller_regs(&hot_regs); | 1071 | __perf_sw_event(event_id, nr, nmi, regs, addr); |
1072 | regs = &hot_regs; | ||
1073 | } | 1072 | } |
1074 | __perf_sw_event(event_id, nr, nmi, regs, addr); | ||
1075 | } | 1073 | } |
1076 | 1074 | ||
1077 | extern atomic_t perf_sched_events; | 1075 | extern struct jump_label_key perf_sched_events; |
1078 | 1076 | ||
1079 | static inline void perf_event_task_sched_in(struct task_struct *task) | 1077 | static inline void perf_event_task_sched_in(struct task_struct *task) |
1080 | { | 1078 | { |
1081 | COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task)); | 1079 | if (static_branch(&perf_sched_events)) |
1080 | __perf_event_task_sched_in(task); | ||
1082 | } | 1081 | } |
1083 | 1082 | ||
1084 | static inline | 1083 | static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) |
1085 | void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) | ||
1086 | { | 1084 | { |
1087 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); | 1085 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); |
1088 | 1086 | ||
@@ -1100,14 +1098,10 @@ extern void perf_event_fork(struct task_struct *tsk); | |||
1100 | /* Callchains */ | 1098 | /* Callchains */ |
1101 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | 1099 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); |
1102 | 1100 | ||
1103 | extern void perf_callchain_user(struct perf_callchain_entry *entry, | 1101 | extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); |
1104 | struct pt_regs *regs); | 1102 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); |
1105 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, | ||
1106 | struct pt_regs *regs); | ||
1107 | |||
1108 | 1103 | ||
1109 | static inline void | 1104 | static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) |
1110 | perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
1111 | { | 1105 | { |
1112 | if (entry->nr < PERF_MAX_STACK_DEPTH) | 1106 | if (entry->nr < PERF_MAX_STACK_DEPTH) |
1113 | entry->ip[entry->nr++] = ip; | 1107 | entry->ip[entry->nr++] = ip; |
@@ -1143,9 +1137,9 @@ extern void perf_tp_event(u64 addr, u64 count, void *record, | |||
1143 | extern void perf_bp_event(struct perf_event *event, void *data); | 1137 | extern void perf_bp_event(struct perf_event *event, void *data); |
1144 | 1138 | ||
1145 | #ifndef perf_misc_flags | 1139 | #ifndef perf_misc_flags |
1146 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ | 1140 | # define perf_misc_flags(regs) \ |
1147 | PERF_RECORD_MISC_KERNEL) | 1141 | (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) |
1148 | #define perf_instruction_pointer(regs) instruction_pointer(regs) | 1142 | # define perf_instruction_pointer(regs) instruction_pointer(regs) |
1149 | #endif | 1143 | #endif |
1150 | 1144 | ||
1151 | extern int perf_output_begin(struct perf_output_handle *handle, | 1145 | extern int perf_output_begin(struct perf_output_handle *handle, |
@@ -1180,9 +1174,9 @@ static inline void | |||
1180 | perf_bp_event(struct perf_event *event, void *data) { } | 1174 | perf_bp_event(struct perf_event *event, void *data) { } |
1181 | 1175 | ||
1182 | static inline int perf_register_guest_info_callbacks | 1176 | static inline int perf_register_guest_info_callbacks |
1183 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | 1177 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
1184 | static inline int perf_unregister_guest_info_callbacks | 1178 | static inline int perf_unregister_guest_info_callbacks |
1185 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | 1179 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
1186 | 1180 | ||
1187 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | 1181 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
1188 | static inline void perf_event_comm(struct task_struct *tsk) { } | 1182 | static inline void perf_event_comm(struct task_struct *tsk) { } |
@@ -1195,23 +1189,22 @@ static inline void perf_event_disable(struct perf_event *event) { } | |||
1195 | static inline void perf_event_task_tick(void) { } | 1189 | static inline void perf_event_task_tick(void) { } |
1196 | #endif | 1190 | #endif |
1197 | 1191 | ||
1198 | #define perf_output_put(handle, x) \ | 1192 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) |
1199 | perf_output_copy((handle), &(x), sizeof(x)) | ||
1200 | 1193 | ||
1201 | /* | 1194 | /* |
1202 | * This has to have a higher priority than migration_notifier in sched.c. | 1195 | * This has to have a higher priority than migration_notifier in sched.c. |
1203 | */ | 1196 | */ |
1204 | #define perf_cpu_notifier(fn) \ | 1197 | #define perf_cpu_notifier(fn) \ |
1205 | do { \ | 1198 | do { \ |
1206 | static struct notifier_block fn##_nb __cpuinitdata = \ | 1199 | static struct notifier_block fn##_nb __cpuinitdata = \ |
1207 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ | 1200 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ |
1208 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ | 1201 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ |
1209 | (void *)(unsigned long)smp_processor_id()); \ | 1202 | (void *)(unsigned long)smp_processor_id()); \ |
1210 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ | 1203 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ |
1211 | (void *)(unsigned long)smp_processor_id()); \ | 1204 | (void *)(unsigned long)smp_processor_id()); \ |
1212 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ | 1205 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ |
1213 | (void *)(unsigned long)smp_processor_id()); \ | 1206 | (void *)(unsigned long)smp_processor_id()); \ |
1214 | register_cpu_notifier(&fn##_nb); \ | 1207 | register_cpu_notifier(&fn##_nb); \ |
1215 | } while (0) | 1208 | } while (0) |
1216 | 1209 | ||
1217 | #endif /* __KERNEL__ */ | 1210 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 744942c95fec..ede1a80e3358 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
@@ -150,9 +150,6 @@ extern struct platform_device *platform_create_bundle(struct platform_driver *dr | |||
150 | struct resource *res, unsigned int n_res, | 150 | struct resource *res, unsigned int n_res, |
151 | const void *data, size_t size); | 151 | const void *data, size_t size); |
152 | 152 | ||
153 | extern const struct dev_pm_ops * platform_bus_get_pm_ops(void); | ||
154 | extern void platform_bus_set_pm_ops(const struct dev_pm_ops *pm); | ||
155 | |||
156 | /* early platform driver interface */ | 153 | /* early platform driver interface */ |
157 | struct early_platform_driver { | 154 | struct early_platform_driver { |
158 | const char *class_str; | 155 | const char *class_str; |
@@ -205,4 +202,64 @@ static inline char *early_platform_driver_setup_func(void) \ | |||
205 | } | 202 | } |
206 | #endif /* MODULE */ | 203 | #endif /* MODULE */ |
207 | 204 | ||
205 | #ifdef CONFIG_PM_SLEEP | ||
206 | extern int platform_pm_prepare(struct device *dev); | ||
207 | extern void platform_pm_complete(struct device *dev); | ||
208 | #else | ||
209 | #define platform_pm_prepare NULL | ||
210 | #define platform_pm_complete NULL | ||
211 | #endif | ||
212 | |||
213 | #ifdef CONFIG_SUSPEND | ||
214 | extern int platform_pm_suspend(struct device *dev); | ||
215 | extern int platform_pm_suspend_noirq(struct device *dev); | ||
216 | extern int platform_pm_resume(struct device *dev); | ||
217 | extern int platform_pm_resume_noirq(struct device *dev); | ||
218 | #else | ||
219 | #define platform_pm_suspend NULL | ||
220 | #define platform_pm_resume NULL | ||
221 | #define platform_pm_suspend_noirq NULL | ||
222 | #define platform_pm_resume_noirq NULL | ||
223 | #endif | ||
224 | |||
225 | #ifdef CONFIG_HIBERNATE_CALLBACKS | ||
226 | extern int platform_pm_freeze(struct device *dev); | ||
227 | extern int platform_pm_freeze_noirq(struct device *dev); | ||
228 | extern int platform_pm_thaw(struct device *dev); | ||
229 | extern int platform_pm_thaw_noirq(struct device *dev); | ||
230 | extern int platform_pm_poweroff(struct device *dev); | ||
231 | extern int platform_pm_poweroff_noirq(struct device *dev); | ||
232 | extern int platform_pm_restore(struct device *dev); | ||
233 | extern int platform_pm_restore_noirq(struct device *dev); | ||
234 | #else | ||
235 | #define platform_pm_freeze NULL | ||
236 | #define platform_pm_thaw NULL | ||
237 | #define platform_pm_poweroff NULL | ||
238 | #define platform_pm_restore NULL | ||
239 | #define platform_pm_freeze_noirq NULL | ||
240 | #define platform_pm_thaw_noirq NULL | ||
241 | #define platform_pm_poweroff_noirq NULL | ||
242 | #define platform_pm_restore_noirq NULL | ||
243 | #endif | ||
244 | |||
245 | #ifdef CONFIG_PM_SLEEP | ||
246 | #define USE_PLATFORM_PM_SLEEP_OPS \ | ||
247 | .prepare = platform_pm_prepare, \ | ||
248 | .complete = platform_pm_complete, \ | ||
249 | .suspend = platform_pm_suspend, \ | ||
250 | .resume = platform_pm_resume, \ | ||
251 | .freeze = platform_pm_freeze, \ | ||
252 | .thaw = platform_pm_thaw, \ | ||
253 | .poweroff = platform_pm_poweroff, \ | ||
254 | .restore = platform_pm_restore, \ | ||
255 | .suspend_noirq = platform_pm_suspend_noirq, \ | ||
256 | .resume_noirq = platform_pm_resume_noirq, \ | ||
257 | .freeze_noirq = platform_pm_freeze_noirq, \ | ||
258 | .thaw_noirq = platform_pm_thaw_noirq, \ | ||
259 | .poweroff_noirq = platform_pm_poweroff_noirq, \ | ||
260 | .restore_noirq = platform_pm_restore_noirq, | ||
261 | #else | ||
262 | #define USE_PLATFORM_PM_SLEEP_OPS | ||
263 | #endif | ||
264 | |||
208 | #endif /* _PLATFORM_DEVICE_H_ */ | 265 | #endif /* _PLATFORM_DEVICE_H_ */ |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 512e09177e57..3160648ccdda 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -460,6 +460,7 @@ struct dev_pm_info { | |||
460 | unsigned long active_jiffies; | 460 | unsigned long active_jiffies; |
461 | unsigned long suspended_jiffies; | 461 | unsigned long suspended_jiffies; |
462 | unsigned long accounting_timestamp; | 462 | unsigned long accounting_timestamp; |
463 | void *subsys_data; /* Owned by the subsystem. */ | ||
463 | #endif | 464 | #endif |
464 | }; | 465 | }; |
465 | 466 | ||
@@ -529,21 +530,17 @@ struct dev_power_domain { | |||
529 | */ | 530 | */ |
530 | 531 | ||
531 | #ifdef CONFIG_PM_SLEEP | 532 | #ifdef CONFIG_PM_SLEEP |
532 | #ifndef CONFIG_ARCH_NO_SYSDEV_OPS | ||
533 | extern int sysdev_suspend(pm_message_t state); | ||
534 | extern int sysdev_resume(void); | ||
535 | #else | ||
536 | static inline int sysdev_suspend(pm_message_t state) { return 0; } | ||
537 | static inline int sysdev_resume(void) { return 0; } | ||
538 | #endif | ||
539 | |||
540 | extern void device_pm_lock(void); | 533 | extern void device_pm_lock(void); |
541 | extern void dpm_resume_noirq(pm_message_t state); | 534 | extern void dpm_resume_noirq(pm_message_t state); |
542 | extern void dpm_resume_end(pm_message_t state); | 535 | extern void dpm_resume_end(pm_message_t state); |
536 | extern void dpm_resume(pm_message_t state); | ||
537 | extern void dpm_complete(pm_message_t state); | ||
543 | 538 | ||
544 | extern void device_pm_unlock(void); | 539 | extern void device_pm_unlock(void); |
545 | extern int dpm_suspend_noirq(pm_message_t state); | 540 | extern int dpm_suspend_noirq(pm_message_t state); |
546 | extern int dpm_suspend_start(pm_message_t state); | 541 | extern int dpm_suspend_start(pm_message_t state); |
542 | extern int dpm_suspend(pm_message_t state); | ||
543 | extern int dpm_prepare(pm_message_t state); | ||
547 | 544 | ||
548 | extern void __suspend_report_result(const char *function, void *fn, int ret); | 545 | extern void __suspend_report_result(const char *function, void *fn, int ret); |
549 | 546 | ||
@@ -553,6 +550,16 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); | |||
553 | } while (0) | 550 | } while (0) |
554 | 551 | ||
555 | extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); | 552 | extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); |
553 | |||
554 | extern int pm_generic_prepare(struct device *dev); | ||
555 | extern int pm_generic_suspend(struct device *dev); | ||
556 | extern int pm_generic_resume(struct device *dev); | ||
557 | extern int pm_generic_freeze(struct device *dev); | ||
558 | extern int pm_generic_thaw(struct device *dev); | ||
559 | extern int pm_generic_restore(struct device *dev); | ||
560 | extern int pm_generic_poweroff(struct device *dev); | ||
561 | extern void pm_generic_complete(struct device *dev); | ||
562 | |||
556 | #else /* !CONFIG_PM_SLEEP */ | 563 | #else /* !CONFIG_PM_SLEEP */ |
557 | 564 | ||
558 | #define device_pm_lock() do {} while (0) | 565 | #define device_pm_lock() do {} while (0) |
@@ -569,6 +576,15 @@ static inline int device_pm_wait_for_dev(struct device *a, struct device *b) | |||
569 | { | 576 | { |
570 | return 0; | 577 | return 0; |
571 | } | 578 | } |
579 | |||
580 | #define pm_generic_prepare NULL | ||
581 | #define pm_generic_suspend NULL | ||
582 | #define pm_generic_resume NULL | ||
583 | #define pm_generic_freeze NULL | ||
584 | #define pm_generic_thaw NULL | ||
585 | #define pm_generic_restore NULL | ||
586 | #define pm_generic_poweroff NULL | ||
587 | #define pm_generic_complete NULL | ||
572 | #endif /* !CONFIG_PM_SLEEP */ | 588 | #endif /* !CONFIG_PM_SLEEP */ |
573 | 589 | ||
574 | /* How to reorder dpm_list after device_move() */ | 590 | /* How to reorder dpm_list after device_move() */ |
@@ -579,11 +595,4 @@ enum dpm_order { | |||
579 | DPM_ORDER_DEV_LAST, | 595 | DPM_ORDER_DEV_LAST, |
580 | }; | 596 | }; |
581 | 597 | ||
582 | extern int pm_generic_suspend(struct device *dev); | ||
583 | extern int pm_generic_resume(struct device *dev); | ||
584 | extern int pm_generic_freeze(struct device *dev); | ||
585 | extern int pm_generic_thaw(struct device *dev); | ||
586 | extern int pm_generic_restore(struct device *dev); | ||
587 | extern int pm_generic_poweroff(struct device *dev); | ||
588 | |||
589 | #endif /* _LINUX_PM_H */ | 598 | #endif /* _LINUX_PM_H */ |
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 8de9aa6e7def..878cf84baeb1 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
@@ -245,4 +245,46 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev) | |||
245 | __pm_runtime_use_autosuspend(dev, false); | 245 | __pm_runtime_use_autosuspend(dev, false); |
246 | } | 246 | } |
247 | 247 | ||
248 | struct pm_clk_notifier_block { | ||
249 | struct notifier_block nb; | ||
250 | struct dev_power_domain *pwr_domain; | ||
251 | char *con_ids[]; | ||
252 | }; | ||
253 | |||
254 | #ifdef CONFIG_PM_RUNTIME_CLK | ||
255 | extern int pm_runtime_clk_init(struct device *dev); | ||
256 | extern void pm_runtime_clk_destroy(struct device *dev); | ||
257 | extern int pm_runtime_clk_add(struct device *dev, const char *con_id); | ||
258 | extern void pm_runtime_clk_remove(struct device *dev, const char *con_id); | ||
259 | extern int pm_runtime_clk_suspend(struct device *dev); | ||
260 | extern int pm_runtime_clk_resume(struct device *dev); | ||
261 | #else | ||
262 | static inline int pm_runtime_clk_init(struct device *dev) | ||
263 | { | ||
264 | return -EINVAL; | ||
265 | } | ||
266 | static inline void pm_runtime_clk_destroy(struct device *dev) | ||
267 | { | ||
268 | } | ||
269 | static inline int pm_runtime_clk_add(struct device *dev, const char *con_id) | ||
270 | { | ||
271 | return -EINVAL; | ||
272 | } | ||
273 | static inline void pm_runtime_clk_remove(struct device *dev, const char *con_id) | ||
274 | { | ||
275 | } | ||
276 | #define pm_runtime_clock_suspend NULL | ||
277 | #define pm_runtime_clock_resume NULL | ||
278 | #endif | ||
279 | |||
280 | #ifdef CONFIG_HAVE_CLK | ||
281 | extern void pm_runtime_clk_add_notifier(struct bus_type *bus, | ||
282 | struct pm_clk_notifier_block *clknb); | ||
283 | #else | ||
284 | static inline void pm_runtime_clk_add_notifier(struct bus_type *bus, | ||
285 | struct pm_clk_notifier_block *clknb) | ||
286 | { | ||
287 | } | ||
288 | #endif | ||
289 | |||
248 | #endif | 290 | #endif |
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 838c1149251a..eaf4350c0f90 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
@@ -208,6 +208,8 @@ static inline struct proc_dir_entry *proc_symlink(const char *name, | |||
208 | struct proc_dir_entry *parent,const char *dest) {return NULL;} | 208 | struct proc_dir_entry *parent,const char *dest) {return NULL;} |
209 | static inline struct proc_dir_entry *proc_mkdir(const char *name, | 209 | static inline struct proc_dir_entry *proc_mkdir(const char *name, |
210 | struct proc_dir_entry *parent) {return NULL;} | 210 | struct proc_dir_entry *parent) {return NULL;} |
211 | static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, | ||
212 | mode_t mode, struct proc_dir_entry *parent) { return NULL; } | ||
211 | 213 | ||
212 | static inline struct proc_dir_entry *create_proc_read_entry(const char *name, | 214 | static inline struct proc_dir_entry *create_proc_read_entry(const char *name, |
213 | mode_t mode, struct proc_dir_entry *base, | 215 | mode_t mode, struct proc_dir_entry *base, |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index a1147e5dd245..9178d5cc0b01 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -189,6 +189,10 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) | |||
189 | child->ptrace = current->ptrace; | 189 | child->ptrace = current->ptrace; |
190 | __ptrace_link(child, current->parent); | 190 | __ptrace_link(child, current->parent); |
191 | } | 191 | } |
192 | |||
193 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
194 | atomic_set(&child->ptrace_bp_refcnt, 1); | ||
195 | #endif | ||
192 | } | 196 | } |
193 | 197 | ||
194 | /** | 198 | /** |
@@ -350,6 +354,13 @@ extern int task_current_syscall(struct task_struct *target, long *callno, | |||
350 | unsigned long args[6], unsigned int maxargs, | 354 | unsigned long args[6], unsigned int maxargs, |
351 | unsigned long *sp, unsigned long *pc); | 355 | unsigned long *sp, unsigned long *pc); |
352 | 356 | ||
353 | #endif | 357 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
358 | extern int ptrace_get_breakpoints(struct task_struct *tsk); | ||
359 | extern void ptrace_put_breakpoints(struct task_struct *tsk); | ||
360 | #else | ||
361 | static inline void ptrace_put_breakpoints(struct task_struct *tsk) { } | ||
362 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | ||
363 | |||
364 | #endif /* __KERNEL */ | ||
354 | 365 | ||
355 | #endif | 366 | #endif |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 2dea94fc4402..e3beb315517a 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -253,7 +253,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
253 | */ | 253 | */ |
254 | #define list_for_each_entry_rcu(pos, head, member) \ | 254 | #define list_for_each_entry_rcu(pos, head, member) \ |
255 | for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ | 255 | for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ |
256 | prefetch(pos->member.next), &pos->member != (head); \ | 256 | &pos->member != (head); \ |
257 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) | 257 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
258 | 258 | ||
259 | 259 | ||
@@ -270,7 +270,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
270 | */ | 270 | */ |
271 | #define list_for_each_continue_rcu(pos, head) \ | 271 | #define list_for_each_continue_rcu(pos, head) \ |
272 | for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ | 272 | for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ |
273 | prefetch((pos)->next), (pos) != (head); \ | 273 | (pos) != (head); \ |
274 | (pos) = rcu_dereference_raw(list_next_rcu(pos))) | 274 | (pos) = rcu_dereference_raw(list_next_rcu(pos))) |
275 | 275 | ||
276 | /** | 276 | /** |
@@ -284,7 +284,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
284 | */ | 284 | */ |
285 | #define list_for_each_entry_continue_rcu(pos, head, member) \ | 285 | #define list_for_each_entry_continue_rcu(pos, head, member) \ |
286 | for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ | 286 | for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ |
287 | prefetch(pos->member.next), &pos->member != (head); \ | 287 | &pos->member != (head); \ |
288 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) | 288 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
289 | 289 | ||
290 | /** | 290 | /** |
@@ -427,7 +427,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
427 | 427 | ||
428 | #define __hlist_for_each_rcu(pos, head) \ | 428 | #define __hlist_for_each_rcu(pos, head) \ |
429 | for (pos = rcu_dereference(hlist_first_rcu(head)); \ | 429 | for (pos = rcu_dereference(hlist_first_rcu(head)); \ |
430 | pos && ({ prefetch(pos->next); 1; }); \ | 430 | pos; \ |
431 | pos = rcu_dereference(hlist_next_rcu(pos))) | 431 | pos = rcu_dereference(hlist_next_rcu(pos))) |
432 | 432 | ||
433 | /** | 433 | /** |
@@ -443,7 +443,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
443 | */ | 443 | */ |
444 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ | 444 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ |
445 | for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ | 445 | for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ |
446 | pos && ({ prefetch(pos->next); 1; }) && \ | 446 | pos && \ |
447 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 447 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
448 | pos = rcu_dereference_raw(hlist_next_rcu(pos))) | 448 | pos = rcu_dereference_raw(hlist_next_rcu(pos))) |
449 | 449 | ||
@@ -460,7 +460,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
460 | */ | 460 | */ |
461 | #define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \ | 461 | #define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \ |
462 | for (pos = rcu_dereference_bh((head)->first); \ | 462 | for (pos = rcu_dereference_bh((head)->first); \ |
463 | pos && ({ prefetch(pos->next); 1; }) && \ | 463 | pos && \ |
464 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 464 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
465 | pos = rcu_dereference_bh(pos->next)) | 465 | pos = rcu_dereference_bh(pos->next)) |
466 | 466 | ||
@@ -472,7 +472,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
472 | */ | 472 | */ |
473 | #define hlist_for_each_entry_continue_rcu(tpos, pos, member) \ | 473 | #define hlist_for_each_entry_continue_rcu(tpos, pos, member) \ |
474 | for (pos = rcu_dereference((pos)->next); \ | 474 | for (pos = rcu_dereference((pos)->next); \ |
475 | pos && ({ prefetch(pos->next); 1; }) && \ | 475 | pos && \ |
476 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 476 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
477 | pos = rcu_dereference(pos->next)) | 477 | pos = rcu_dereference(pos->next)) |
478 | 478 | ||
@@ -484,7 +484,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
484 | */ | 484 | */ |
485 | #define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \ | 485 | #define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \ |
486 | for (pos = rcu_dereference_bh((pos)->next); \ | 486 | for (pos = rcu_dereference_bh((pos)->next); \ |
487 | pos && ({ prefetch(pos->next); 1; }) && \ | 487 | pos && \ |
488 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 488 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
489 | pos = rcu_dereference_bh(pos->next)) | 489 | pos = rcu_dereference_bh(pos->next)) |
490 | 490 | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index ff422d2b7f90..99f9aa7c2804 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -47,6 +47,18 @@ | |||
47 | extern int rcutorture_runnable; /* for sysctl */ | 47 | extern int rcutorture_runnable; /* for sysctl */ |
48 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ | 48 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ |
49 | 49 | ||
50 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | ||
51 | extern void rcutorture_record_test_transition(void); | ||
52 | extern void rcutorture_record_progress(unsigned long vernum); | ||
53 | #else | ||
54 | static inline void rcutorture_record_test_transition(void) | ||
55 | { | ||
56 | } | ||
57 | static inline void rcutorture_record_progress(unsigned long vernum) | ||
58 | { | ||
59 | } | ||
60 | #endif | ||
61 | |||
50 | #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) | 62 | #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) |
51 | #define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b)) | 63 | #define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b)) |
52 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) | 64 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) |
@@ -68,7 +80,6 @@ extern void call_rcu_sched(struct rcu_head *head, | |||
68 | extern void synchronize_sched(void); | 80 | extern void synchronize_sched(void); |
69 | extern void rcu_barrier_bh(void); | 81 | extern void rcu_barrier_bh(void); |
70 | extern void rcu_barrier_sched(void); | 82 | extern void rcu_barrier_sched(void); |
71 | extern int sched_expedited_torture_stats(char *page); | ||
72 | 83 | ||
73 | static inline void __rcu_read_lock_bh(void) | 84 | static inline void __rcu_read_lock_bh(void) |
74 | { | 85 | { |
@@ -774,6 +785,7 @@ extern struct debug_obj_descr rcuhead_debug_descr; | |||
774 | 785 | ||
775 | static inline void debug_rcu_head_queue(struct rcu_head *head) | 786 | static inline void debug_rcu_head_queue(struct rcu_head *head) |
776 | { | 787 | { |
788 | WARN_ON_ONCE((unsigned long)head & 0x3); | ||
777 | debug_object_activate(head, &rcuhead_debug_descr); | 789 | debug_object_activate(head, &rcuhead_debug_descr); |
778 | debug_object_active_state(head, &rcuhead_debug_descr, | 790 | debug_object_active_state(head, &rcuhead_debug_descr, |
779 | STATE_RCU_HEAD_READY, | 791 | STATE_RCU_HEAD_READY, |
@@ -797,4 +809,60 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head) | |||
797 | } | 809 | } |
798 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 810 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
799 | 811 | ||
812 | static __always_inline bool __is_kfree_rcu_offset(unsigned long offset) | ||
813 | { | ||
814 | return offset < 4096; | ||
815 | } | ||
816 | |||
817 | static __always_inline | ||
818 | void __kfree_rcu(struct rcu_head *head, unsigned long offset) | ||
819 | { | ||
820 | typedef void (*rcu_callback)(struct rcu_head *); | ||
821 | |||
822 | BUILD_BUG_ON(!__builtin_constant_p(offset)); | ||
823 | |||
824 | /* See the kfree_rcu() header comment. */ | ||
825 | BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); | ||
826 | |||
827 | call_rcu(head, (rcu_callback)offset); | ||
828 | } | ||
829 | |||
830 | extern void kfree(const void *); | ||
831 | |||
832 | static inline void __rcu_reclaim(struct rcu_head *head) | ||
833 | { | ||
834 | unsigned long offset = (unsigned long)head->func; | ||
835 | |||
836 | if (__is_kfree_rcu_offset(offset)) | ||
837 | kfree((void *)head - offset); | ||
838 | else | ||
839 | head->func(head); | ||
840 | } | ||
841 | |||
842 | /** | ||
843 | * kfree_rcu() - kfree an object after a grace period. | ||
844 | * @ptr: pointer to kfree | ||
845 | * @rcu_head: the name of the struct rcu_head within the type of @ptr. | ||
846 | * | ||
847 | * Many rcu callbacks functions just call kfree() on the base structure. | ||
848 | * These functions are trivial, but their size adds up, and furthermore | ||
849 | * when they are used in a kernel module, that module must invoke the | ||
850 | * high-latency rcu_barrier() function at module-unload time. | ||
851 | * | ||
852 | * The kfree_rcu() function handles this issue. Rather than encoding a | ||
853 | * function address in the embedded rcu_head structure, kfree_rcu() instead | ||
854 | * encodes the offset of the rcu_head structure within the base structure. | ||
855 | * Because the functions are not allowed in the low-order 4096 bytes of | ||
856 | * kernel virtual memory, offsets up to 4095 bytes can be accommodated. | ||
857 | * If the offset is larger than 4095 bytes, a compile-time error will | ||
858 | * be generated in __kfree_rcu(). If this error is triggered, you can | ||
859 | * either fall back to use of call_rcu() or rearrange the structure to | ||
860 | * position the rcu_head structure into the first 4096 bytes. | ||
861 | * | ||
862 | * Note that the allowable offset might decrease in the future, for example, | ||
863 | * to allow something like kmem_cache_free_rcu(). | ||
864 | */ | ||
865 | #define kfree_rcu(ptr, rcu_head) \ | ||
866 | __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) | ||
867 | |||
800 | #endif /* __LINUX_RCUPDATE_H */ | 868 | #endif /* __LINUX_RCUPDATE_H */ |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 30ebd7c8d874..52b3e0281fd0 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -100,6 +100,14 @@ static inline void rcu_note_context_switch(int cpu) | |||
100 | } | 100 | } |
101 | 101 | ||
102 | /* | 102 | /* |
103 | * Take advantage of the fact that there is only one CPU, which | ||
104 | * allows us to ignore virtualization-based context switches. | ||
105 | */ | ||
106 | static inline void rcu_virt_note_context_switch(int cpu) | ||
107 | { | ||
108 | } | ||
109 | |||
110 | /* | ||
103 | * Return the number of grace periods. | 111 | * Return the number of grace periods. |
104 | */ | 112 | */ |
105 | static inline long rcu_batches_completed(void) | 113 | static inline long rcu_batches_completed(void) |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 3a933482734a..e65d06634dd8 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -35,6 +35,16 @@ extern void rcu_note_context_switch(int cpu); | |||
35 | extern int rcu_needs_cpu(int cpu); | 35 | extern int rcu_needs_cpu(int cpu); |
36 | extern void rcu_cpu_stall_reset(void); | 36 | extern void rcu_cpu_stall_reset(void); |
37 | 37 | ||
38 | /* | ||
39 | * Note a virtualization-based context switch. This is simply a | ||
40 | * wrapper around rcu_note_context_switch(), which allows TINY_RCU | ||
41 | * to save a few bytes. | ||
42 | */ | ||
43 | static inline void rcu_virt_note_context_switch(int cpu) | ||
44 | { | ||
45 | rcu_note_context_switch(cpu); | ||
46 | } | ||
47 | |||
38 | #ifdef CONFIG_TREE_PREEMPT_RCU | 48 | #ifdef CONFIG_TREE_PREEMPT_RCU |
39 | 49 | ||
40 | extern void exit_rcu(void); | 50 | extern void exit_rcu(void); |
@@ -58,9 +68,12 @@ static inline void synchronize_rcu_bh_expedited(void) | |||
58 | 68 | ||
59 | extern void rcu_barrier(void); | 69 | extern void rcu_barrier(void); |
60 | 70 | ||
71 | extern unsigned long rcutorture_testseq; | ||
72 | extern unsigned long rcutorture_vernum; | ||
61 | extern long rcu_batches_completed(void); | 73 | extern long rcu_batches_completed(void); |
62 | extern long rcu_batches_completed_bh(void); | 74 | extern long rcu_batches_completed_bh(void); |
63 | extern long rcu_batches_completed_sched(void); | 75 | extern long rcu_batches_completed_sched(void); |
76 | |||
64 | extern void rcu_force_quiescent_state(void); | 77 | extern void rcu_force_quiescent_state(void); |
65 | extern void rcu_bh_force_quiescent_state(void); | 78 | extern void rcu_bh_force_quiescent_state(void); |
66 | extern void rcu_sched_force_quiescent_state(void); | 79 | extern void rcu_sched_force_quiescent_state(void); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 18d63cea2848..12211e1666e2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -360,7 +360,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout); | |||
360 | extern signed long schedule_timeout_killable(signed long timeout); | 360 | extern signed long schedule_timeout_killable(signed long timeout); |
361 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | 361 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
362 | asmlinkage void schedule(void); | 362 | asmlinkage void schedule(void); |
363 | extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); | 363 | extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); |
364 | 364 | ||
365 | struct nsproxy; | 365 | struct nsproxy; |
366 | struct user_namespace; | 366 | struct user_namespace; |
@@ -731,10 +731,6 @@ struct sched_info { | |||
731 | /* timestamps */ | 731 | /* timestamps */ |
732 | unsigned long long last_arrival,/* when we last ran on a cpu */ | 732 | unsigned long long last_arrival,/* when we last ran on a cpu */ |
733 | last_queued; /* when we were last queued to run */ | 733 | last_queued; /* when we were last queued to run */ |
734 | #ifdef CONFIG_SCHEDSTATS | ||
735 | /* BKL stats */ | ||
736 | unsigned int bkl_count; | ||
737 | #endif | ||
738 | }; | 734 | }; |
739 | #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ | 735 | #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ |
740 | 736 | ||
@@ -868,6 +864,7 @@ static inline int sd_power_saving_flags(void) | |||
868 | 864 | ||
869 | struct sched_group { | 865 | struct sched_group { |
870 | struct sched_group *next; /* Must be a circular list */ | 866 | struct sched_group *next; /* Must be a circular list */ |
867 | atomic_t ref; | ||
871 | 868 | ||
872 | /* | 869 | /* |
873 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 870 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
@@ -882,9 +879,6 @@ struct sched_group { | |||
882 | * NOTE: this field is variable length. (Allocated dynamically | 879 | * NOTE: this field is variable length. (Allocated dynamically |
883 | * by attaching extra space to the end of the structure, | 880 | * by attaching extra space to the end of the structure, |
884 | * depending on how many CPUs the kernel has booted up with) | 881 | * depending on how many CPUs the kernel has booted up with) |
885 | * | ||
886 | * It is also be embedded into static data structures at build | ||
887 | * time. (See 'struct static_sched_group' in kernel/sched.c) | ||
888 | */ | 882 | */ |
889 | unsigned long cpumask[0]; | 883 | unsigned long cpumask[0]; |
890 | }; | 884 | }; |
@@ -894,17 +888,6 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | |||
894 | return to_cpumask(sg->cpumask); | 888 | return to_cpumask(sg->cpumask); |
895 | } | 889 | } |
896 | 890 | ||
897 | enum sched_domain_level { | ||
898 | SD_LV_NONE = 0, | ||
899 | SD_LV_SIBLING, | ||
900 | SD_LV_MC, | ||
901 | SD_LV_BOOK, | ||
902 | SD_LV_CPU, | ||
903 | SD_LV_NODE, | ||
904 | SD_LV_ALLNODES, | ||
905 | SD_LV_MAX | ||
906 | }; | ||
907 | |||
908 | struct sched_domain_attr { | 891 | struct sched_domain_attr { |
909 | int relax_domain_level; | 892 | int relax_domain_level; |
910 | }; | 893 | }; |
@@ -913,6 +896,8 @@ struct sched_domain_attr { | |||
913 | .relax_domain_level = -1, \ | 896 | .relax_domain_level = -1, \ |
914 | } | 897 | } |
915 | 898 | ||
899 | extern int sched_domain_level_max; | ||
900 | |||
916 | struct sched_domain { | 901 | struct sched_domain { |
917 | /* These fields must be setup */ | 902 | /* These fields must be setup */ |
918 | struct sched_domain *parent; /* top domain must be null terminated */ | 903 | struct sched_domain *parent; /* top domain must be null terminated */ |
@@ -930,7 +915,7 @@ struct sched_domain { | |||
930 | unsigned int forkexec_idx; | 915 | unsigned int forkexec_idx; |
931 | unsigned int smt_gain; | 916 | unsigned int smt_gain; |
932 | int flags; /* See SD_* */ | 917 | int flags; /* See SD_* */ |
933 | enum sched_domain_level level; | 918 | int level; |
934 | 919 | ||
935 | /* Runtime fields. */ | 920 | /* Runtime fields. */ |
936 | unsigned long last_balance; /* init to jiffies. units in jiffies */ | 921 | unsigned long last_balance; /* init to jiffies. units in jiffies */ |
@@ -973,6 +958,10 @@ struct sched_domain { | |||
973 | #ifdef CONFIG_SCHED_DEBUG | 958 | #ifdef CONFIG_SCHED_DEBUG |
974 | char *name; | 959 | char *name; |
975 | #endif | 960 | #endif |
961 | union { | ||
962 | void *private; /* used during construction */ | ||
963 | struct rcu_head rcu; /* used during destruction */ | ||
964 | }; | ||
976 | 965 | ||
977 | unsigned int span_weight; | 966 | unsigned int span_weight; |
978 | /* | 967 | /* |
@@ -981,9 +970,6 @@ struct sched_domain { | |||
981 | * NOTE: this field is variable length. (Allocated dynamically | 970 | * NOTE: this field is variable length. (Allocated dynamically |
982 | * by attaching extra space to the end of the structure, | 971 | * by attaching extra space to the end of the structure, |
983 | * depending on how many CPUs the kernel has booted up with) | 972 | * depending on how many CPUs the kernel has booted up with) |
984 | * | ||
985 | * It is also be embedded into static data structures at build | ||
986 | * time. (See 'struct static_sched_domain' in kernel/sched.c) | ||
987 | */ | 973 | */ |
988 | unsigned long span[0]; | 974 | unsigned long span[0]; |
989 | }; | 975 | }; |
@@ -1048,8 +1034,12 @@ struct sched_domain; | |||
1048 | #define WF_FORK 0x02 /* child wakeup after fork */ | 1034 | #define WF_FORK 0x02 /* child wakeup after fork */ |
1049 | 1035 | ||
1050 | #define ENQUEUE_WAKEUP 1 | 1036 | #define ENQUEUE_WAKEUP 1 |
1051 | #define ENQUEUE_WAKING 2 | 1037 | #define ENQUEUE_HEAD 2 |
1052 | #define ENQUEUE_HEAD 4 | 1038 | #ifdef CONFIG_SMP |
1039 | #define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ | ||
1040 | #else | ||
1041 | #define ENQUEUE_WAKING 0 | ||
1042 | #endif | ||
1053 | 1043 | ||
1054 | #define DEQUEUE_SLEEP 1 | 1044 | #define DEQUEUE_SLEEP 1 |
1055 | 1045 | ||
@@ -1067,12 +1057,11 @@ struct sched_class { | |||
1067 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 1057 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
1068 | 1058 | ||
1069 | #ifdef CONFIG_SMP | 1059 | #ifdef CONFIG_SMP |
1070 | int (*select_task_rq)(struct rq *rq, struct task_struct *p, | 1060 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); |
1071 | int sd_flag, int flags); | ||
1072 | 1061 | ||
1073 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1062 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
1074 | void (*post_schedule) (struct rq *this_rq); | 1063 | void (*post_schedule) (struct rq *this_rq); |
1075 | void (*task_waking) (struct rq *this_rq, struct task_struct *task); | 1064 | void (*task_waking) (struct task_struct *task); |
1076 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); | 1065 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); |
1077 | 1066 | ||
1078 | void (*set_cpus_allowed)(struct task_struct *p, | 1067 | void (*set_cpus_allowed)(struct task_struct *p, |
@@ -1197,13 +1186,11 @@ struct task_struct { | |||
1197 | unsigned int flags; /* per process flags, defined below */ | 1186 | unsigned int flags; /* per process flags, defined below */ |
1198 | unsigned int ptrace; | 1187 | unsigned int ptrace; |
1199 | 1188 | ||
1200 | int lock_depth; /* BKL lock depth */ | ||
1201 | |||
1202 | #ifdef CONFIG_SMP | 1189 | #ifdef CONFIG_SMP |
1203 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | 1190 | struct task_struct *wake_entry; |
1204 | int oncpu; | 1191 | int on_cpu; |
1205 | #endif | ||
1206 | #endif | 1192 | #endif |
1193 | int on_rq; | ||
1207 | 1194 | ||
1208 | int prio, static_prio, normal_prio; | 1195 | int prio, static_prio, normal_prio; |
1209 | unsigned int rt_priority; | 1196 | unsigned int rt_priority; |
@@ -1274,6 +1261,7 @@ struct task_struct { | |||
1274 | 1261 | ||
1275 | /* Revert to default priority/policy when forking */ | 1262 | /* Revert to default priority/policy when forking */ |
1276 | unsigned sched_reset_on_fork:1; | 1263 | unsigned sched_reset_on_fork:1; |
1264 | unsigned sched_contributes_to_load:1; | ||
1277 | 1265 | ||
1278 | pid_t pid; | 1266 | pid_t pid; |
1279 | pid_t tgid; | 1267 | pid_t tgid; |
@@ -1537,6 +1525,9 @@ struct task_struct { | |||
1537 | unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ | 1525 | unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ |
1538 | } memcg_batch; | 1526 | } memcg_batch; |
1539 | #endif | 1527 | #endif |
1528 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
1529 | atomic_t ptrace_bp_refcnt; | ||
1530 | #endif | ||
1540 | }; | 1531 | }; |
1541 | 1532 | ||
1542 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1533 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
@@ -2060,14 +2051,13 @@ extern void xtime_update(unsigned long ticks); | |||
2060 | 2051 | ||
2061 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); | 2052 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); |
2062 | extern int wake_up_process(struct task_struct *tsk); | 2053 | extern int wake_up_process(struct task_struct *tsk); |
2063 | extern void wake_up_new_task(struct task_struct *tsk, | 2054 | extern void wake_up_new_task(struct task_struct *tsk); |
2064 | unsigned long clone_flags); | ||
2065 | #ifdef CONFIG_SMP | 2055 | #ifdef CONFIG_SMP |
2066 | extern void kick_process(struct task_struct *tsk); | 2056 | extern void kick_process(struct task_struct *tsk); |
2067 | #else | 2057 | #else |
2068 | static inline void kick_process(struct task_struct *tsk) { } | 2058 | static inline void kick_process(struct task_struct *tsk) { } |
2069 | #endif | 2059 | #endif |
2070 | extern void sched_fork(struct task_struct *p, int clone_flags); | 2060 | extern void sched_fork(struct task_struct *p); |
2071 | extern void sched_dead(struct task_struct *p); | 2061 | extern void sched_dead(struct task_struct *p); |
2072 | 2062 | ||
2073 | extern void proc_caches_init(void); | 2063 | extern void proc_caches_init(void); |
@@ -2192,8 +2182,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from); | |||
2192 | extern char *get_task_comm(char *to, struct task_struct *tsk); | 2182 | extern char *get_task_comm(char *to, struct task_struct *tsk); |
2193 | 2183 | ||
2194 | #ifdef CONFIG_SMP | 2184 | #ifdef CONFIG_SMP |
2185 | void scheduler_ipi(void); | ||
2195 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); | 2186 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
2196 | #else | 2187 | #else |
2188 | static inline void scheduler_ipi(void) { } | ||
2197 | static inline unsigned long wait_task_inactive(struct task_struct *p, | 2189 | static inline unsigned long wait_task_inactive(struct task_struct *p, |
2198 | long match_state) | 2190 | long match_state) |
2199 | { | 2191 | { |
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index e98cd2e57194..06d69648fc86 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h | |||
@@ -88,12 +88,12 @@ static __always_inline unsigned read_seqbegin(const seqlock_t *sl) | |||
88 | unsigned ret; | 88 | unsigned ret; |
89 | 89 | ||
90 | repeat: | 90 | repeat: |
91 | ret = sl->sequence; | 91 | ret = ACCESS_ONCE(sl->sequence); |
92 | smp_rmb(); | ||
93 | if (unlikely(ret & 1)) { | 92 | if (unlikely(ret & 1)) { |
94 | cpu_relax(); | 93 | cpu_relax(); |
95 | goto repeat; | 94 | goto repeat; |
96 | } | 95 | } |
96 | smp_rmb(); | ||
97 | 97 | ||
98 | return ret; | 98 | return ret; |
99 | } | 99 | } |
diff --git a/include/linux/signal.h b/include/linux/signal.h index fcd2b14b1932..29a68ac7af83 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
@@ -7,6 +7,8 @@ | |||
7 | #ifdef __KERNEL__ | 7 | #ifdef __KERNEL__ |
8 | #include <linux/list.h> | 8 | #include <linux/list.h> |
9 | 9 | ||
10 | struct task_struct; | ||
11 | |||
10 | /* for sysctl */ | 12 | /* for sysctl */ |
11 | extern int print_fatal_signals; | 13 | extern int print_fatal_signals; |
12 | /* | 14 | /* |
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 9659eff52ca2..045f72ab5dfd 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h | |||
@@ -404,7 +404,9 @@ extern bool ssb_is_sprom_available(struct ssb_bus *bus); | |||
404 | 404 | ||
405 | /* Set a fallback SPROM. | 405 | /* Set a fallback SPROM. |
406 | * See kdoc at the function definition for complete documentation. */ | 406 | * See kdoc at the function definition for complete documentation. */ |
407 | extern int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom); | 407 | extern int ssb_arch_register_fallback_sprom( |
408 | int (*sprom_callback)(struct ssb_bus *bus, | ||
409 | struct ssb_sprom *out)); | ||
408 | 410 | ||
409 | /* Suspend a SSB bus. | 411 | /* Suspend a SSB bus. |
410 | * Call this from the parent bus suspend routine. */ | 412 | * Call this from the parent bus suspend routine. */ |
diff --git a/include/linux/string.h b/include/linux/string.h index a716ee2a8adb..a176db2f2c85 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
@@ -123,6 +123,7 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp); | |||
123 | extern void argv_free(char **argv); | 123 | extern void argv_free(char **argv); |
124 | 124 | ||
125 | extern bool sysfs_streq(const char *s1, const char *s2); | 125 | extern bool sysfs_streq(const char *s1, const char *s2); |
126 | extern int strtobool(const char *s, bool *res); | ||
126 | 127 | ||
127 | #ifdef CONFIG_BINARY_PRINTF | 128 | #ifdef CONFIG_BINARY_PRINTF |
128 | int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); | 129 | int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); |
diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h index dfb078db8ebb..d35e783a598c 100644 --- a/include/linux/sysdev.h +++ b/include/linux/sysdev.h | |||
@@ -34,12 +34,6 @@ struct sysdev_class { | |||
34 | struct list_head drivers; | 34 | struct list_head drivers; |
35 | struct sysdev_class_attribute **attrs; | 35 | struct sysdev_class_attribute **attrs; |
36 | struct kset kset; | 36 | struct kset kset; |
37 | #ifndef CONFIG_ARCH_NO_SYSDEV_OPS | ||
38 | /* Default operations for these types of devices */ | ||
39 | int (*shutdown)(struct sys_device *); | ||
40 | int (*suspend)(struct sys_device *, pm_message_t state); | ||
41 | int (*resume)(struct sys_device *); | ||
42 | #endif | ||
43 | }; | 37 | }; |
44 | 38 | ||
45 | struct sysdev_class_attribute { | 39 | struct sysdev_class_attribute { |
@@ -77,11 +71,6 @@ struct sysdev_driver { | |||
77 | struct list_head entry; | 71 | struct list_head entry; |
78 | int (*add)(struct sys_device *); | 72 | int (*add)(struct sys_device *); |
79 | int (*remove)(struct sys_device *); | 73 | int (*remove)(struct sys_device *); |
80 | #ifndef CONFIG_ARCH_NO_SYSDEV_OPS | ||
81 | int (*shutdown)(struct sys_device *); | ||
82 | int (*suspend)(struct sys_device *, pm_message_t state); | ||
83 | int (*resume)(struct sys_device *); | ||
84 | #endif | ||
85 | }; | 74 | }; |
86 | 75 | ||
87 | 76 | ||
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 30b881555fa5..c3acda60eee0 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h | |||
@@ -176,7 +176,6 @@ struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd, | |||
176 | const unsigned char *name); | 176 | const unsigned char *name); |
177 | struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd); | 177 | struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd); |
178 | void sysfs_put(struct sysfs_dirent *sd); | 178 | void sysfs_put(struct sysfs_dirent *sd); |
179 | void sysfs_printk_last_file(void); | ||
180 | 179 | ||
181 | /* Called to clear a ns tag when it is no longer valid */ | 180 | /* Called to clear a ns tag when it is no longer valid */ |
182 | void sysfs_exit_ns(enum kobj_ns_type type, const void *tag); | 181 | void sysfs_exit_ns(enum kobj_ns_type type, const void *tag); |
@@ -348,10 +347,6 @@ static inline int __must_check sysfs_init(void) | |||
348 | return 0; | 347 | return 0; |
349 | } | 348 | } |
350 | 349 | ||
351 | static inline void sysfs_printk_last_file(void) | ||
352 | { | ||
353 | } | ||
354 | |||
355 | #endif /* CONFIG_SYSFS */ | 350 | #endif /* CONFIG_SYSFS */ |
356 | 351 | ||
357 | #endif /* _SYSFS_H_ */ | 352 | #endif /* _SYSFS_H_ */ |
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h index 7071ec5d0118..b004e557caa9 100644 --- a/include/linux/ti_wilink_st.h +++ b/include/linux/ti_wilink_st.h | |||
@@ -140,12 +140,12 @@ extern long st_unregister(struct st_proto_s *); | |||
140 | */ | 140 | */ |
141 | struct st_data_s { | 141 | struct st_data_s { |
142 | unsigned long st_state; | 142 | unsigned long st_state; |
143 | struct tty_struct *tty; | ||
144 | struct sk_buff *tx_skb; | 143 | struct sk_buff *tx_skb; |
145 | #define ST_TX_SENDING 1 | 144 | #define ST_TX_SENDING 1 |
146 | #define ST_TX_WAKEUP 2 | 145 | #define ST_TX_WAKEUP 2 |
147 | unsigned long tx_state; | 146 | unsigned long tx_state; |
148 | struct st_proto_s *list[ST_MAX_CHANNELS]; | 147 | struct st_proto_s *list[ST_MAX_CHANNELS]; |
148 | bool is_registered[ST_MAX_CHANNELS]; | ||
149 | unsigned long rx_state; | 149 | unsigned long rx_state; |
150 | unsigned long rx_count; | 150 | unsigned long rx_count; |
151 | struct sk_buff *rx_skb; | 151 | struct sk_buff *rx_skb; |
@@ -155,6 +155,7 @@ struct st_data_s { | |||
155 | unsigned char protos_registered; | 155 | unsigned char protos_registered; |
156 | unsigned long ll_state; | 156 | unsigned long ll_state; |
157 | void *kim_data; | 157 | void *kim_data; |
158 | struct tty_struct *tty; | ||
158 | }; | 159 | }; |
159 | 160 | ||
160 | /* | 161 | /* |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 97c84a58efb8..d530a4460a0b 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -29,7 +29,7 @@ struct tracepoint_func { | |||
29 | 29 | ||
30 | struct tracepoint { | 30 | struct tracepoint { |
31 | const char *name; /* Tracepoint name */ | 31 | const char *name; /* Tracepoint name */ |
32 | int state; /* State. */ | 32 | struct jump_label_key key; |
33 | void (*regfunc)(void); | 33 | void (*regfunc)(void); |
34 | void (*unregfunc)(void); | 34 | void (*unregfunc)(void); |
35 | struct tracepoint_func __rcu *funcs; | 35 | struct tracepoint_func __rcu *funcs; |
@@ -146,9 +146,7 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin, | |||
146 | extern struct tracepoint __tracepoint_##name; \ | 146 | extern struct tracepoint __tracepoint_##name; \ |
147 | static inline void trace_##name(proto) \ | 147 | static inline void trace_##name(proto) \ |
148 | { \ | 148 | { \ |
149 | JUMP_LABEL(&__tracepoint_##name.state, do_trace); \ | 149 | if (static_branch(&__tracepoint_##name.key)) \ |
150 | return; \ | ||
151 | do_trace: \ | ||
152 | __DO_TRACE(&__tracepoint_##name, \ | 150 | __DO_TRACE(&__tracepoint_##name, \ |
153 | TP_PROTO(data_proto), \ | 151 | TP_PROTO(data_proto), \ |
154 | TP_ARGS(data_args), \ | 152 | TP_ARGS(data_args), \ |
@@ -176,14 +174,14 @@ do_trace: \ | |||
176 | * structures, so we create an array of pointers that will be used for iteration | 174 | * structures, so we create an array of pointers that will be used for iteration |
177 | * on the tracepoints. | 175 | * on the tracepoints. |
178 | */ | 176 | */ |
179 | #define DEFINE_TRACE_FN(name, reg, unreg) \ | 177 | #define DEFINE_TRACE_FN(name, reg, unreg) \ |
180 | static const char __tpstrtab_##name[] \ | 178 | static const char __tpstrtab_##name[] \ |
181 | __attribute__((section("__tracepoints_strings"))) = #name; \ | 179 | __attribute__((section("__tracepoints_strings"))) = #name; \ |
182 | struct tracepoint __tracepoint_##name \ | 180 | struct tracepoint __tracepoint_##name \ |
183 | __attribute__((section("__tracepoints"))) = \ | 181 | __attribute__((section("__tracepoints"))) = \ |
184 | { __tpstrtab_##name, 0, reg, unreg, NULL }; \ | 182 | { __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\ |
185 | static struct tracepoint * const __tracepoint_ptr_##name __used \ | 183 | static struct tracepoint * const __tracepoint_ptr_##name __used \ |
186 | __attribute__((section("__tracepoints_ptrs"))) = \ | 184 | __attribute__((section("__tracepoints_ptrs"))) = \ |
187 | &__tracepoint_##name; | 185 | &__tracepoint_##name; |
188 | 186 | ||
189 | #define DEFINE_TRACE(name) \ | 187 | #define DEFINE_TRACE(name) \ |
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 0e1855079fbb..605b0aa8d852 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h | |||
@@ -68,6 +68,7 @@ struct usbnet { | |||
68 | # define EVENT_RX_PAUSED 5 | 68 | # define EVENT_RX_PAUSED 5 |
69 | # define EVENT_DEV_WAKING 6 | 69 | # define EVENT_DEV_WAKING 6 |
70 | # define EVENT_DEV_ASLEEP 7 | 70 | # define EVENT_DEV_ASLEEP 7 |
71 | # define EVENT_DEV_OPEN 8 | ||
71 | }; | 72 | }; |
72 | 73 | ||
73 | static inline struct usb_driver *driver_of(struct usb_interface *intf) | 74 | static inline struct usb_driver *driver_of(struct usb_interface *intf) |
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index 88bdd010d65d..2fa8d1341a0a 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h | |||
@@ -38,9 +38,19 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner) | |||
38 | return outer; | 38 | return outer; |
39 | } | 39 | } |
40 | 40 | ||
41 | #define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0) | 41 | static inline void INET_ECN_xmit(struct sock *sk) |
42 | #define INET_ECN_dontxmit(sk) \ | 42 | { |
43 | do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0) | 43 | inet_sk(sk)->tos |= INET_ECN_ECT_0; |
44 | if (inet6_sk(sk) != NULL) | ||
45 | inet6_sk(sk)->tclass |= INET_ECN_ECT_0; | ||
46 | } | ||
47 | |||
48 | static inline void INET_ECN_dontxmit(struct sock *sk) | ||
49 | { | ||
50 | inet_sk(sk)->tos &= ~INET_ECN_MASK; | ||
51 | if (inet6_sk(sk) != NULL) | ||
52 | inet6_sk(sk)->tclass &= ~INET_ECN_MASK; | ||
53 | } | ||
44 | 54 | ||
45 | #define IP6_ECN_flow_init(label) do { \ | 55 | #define IP6_ECN_flow_init(label) do { \ |
46 | (label) &= ~htonl(INET_ECN_MASK << 20); \ | 56 | (label) &= ~htonl(INET_ECN_MASK << 20); \ |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index d516f00c8e0f..86aefed6140b 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
@@ -791,6 +791,7 @@ struct ip_vs_app { | |||
791 | /* IPVS in network namespace */ | 791 | /* IPVS in network namespace */ |
792 | struct netns_ipvs { | 792 | struct netns_ipvs { |
793 | int gen; /* Generation */ | 793 | int gen; /* Generation */ |
794 | int enable; /* enable like nf_hooks do */ | ||
794 | /* | 795 | /* |
795 | * Hash table: for real service lookups | 796 | * Hash table: for real service lookups |
796 | */ | 797 | */ |
@@ -1089,6 +1090,22 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) | |||
1089 | atomic_inc(&ctl_cp->n_control); | 1090 | atomic_inc(&ctl_cp->n_control); |
1090 | } | 1091 | } |
1091 | 1092 | ||
1093 | /* | ||
1094 | * IPVS netns init & cleanup functions | ||
1095 | */ | ||
1096 | extern int __ip_vs_estimator_init(struct net *net); | ||
1097 | extern int __ip_vs_control_init(struct net *net); | ||
1098 | extern int __ip_vs_protocol_init(struct net *net); | ||
1099 | extern int __ip_vs_app_init(struct net *net); | ||
1100 | extern int __ip_vs_conn_init(struct net *net); | ||
1101 | extern int __ip_vs_sync_init(struct net *net); | ||
1102 | extern void __ip_vs_conn_cleanup(struct net *net); | ||
1103 | extern void __ip_vs_app_cleanup(struct net *net); | ||
1104 | extern void __ip_vs_protocol_cleanup(struct net *net); | ||
1105 | extern void __ip_vs_control_cleanup(struct net *net); | ||
1106 | extern void __ip_vs_estimator_cleanup(struct net *net); | ||
1107 | extern void __ip_vs_sync_cleanup(struct net *net); | ||
1108 | extern void __ip_vs_service_cleanup(struct net *net); | ||
1092 | 1109 | ||
1093 | /* | 1110 | /* |
1094 | * IPVS application functions | 1111 | * IPVS application functions |
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index 75b8e2968c9b..f57e7d46a453 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h | |||
@@ -199,7 +199,7 @@ struct llc_pdu_sn { | |||
199 | u8 ssap; | 199 | u8 ssap; |
200 | u8 ctrl_1; | 200 | u8 ctrl_1; |
201 | u8 ctrl_2; | 201 | u8 ctrl_2; |
202 | }; | 202 | } __packed; |
203 | 203 | ||
204 | static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) | 204 | static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) |
205 | { | 205 | { |
@@ -211,7 +211,7 @@ struct llc_pdu_un { | |||
211 | u8 dsap; | 211 | u8 dsap; |
212 | u8 ssap; | 212 | u8 ssap; |
213 | u8 ctrl_1; | 213 | u8 ctrl_1; |
214 | }; | 214 | } __packed; |
215 | 215 | ||
216 | static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) | 216 | static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) |
217 | { | 217 | { |
@@ -359,7 +359,7 @@ struct llc_xid_info { | |||
359 | u8 fmt_id; /* always 0x81 for LLC */ | 359 | u8 fmt_id; /* always 0x81 for LLC */ |
360 | u8 type; /* different if NULL/non-NULL LSAP */ | 360 | u8 type; /* different if NULL/non-NULL LSAP */ |
361 | u8 rw; /* sender receive window */ | 361 | u8 rw; /* sender receive window */ |
362 | }; | 362 | } __packed; |
363 | 363 | ||
364 | /** | 364 | /** |
365 | * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID | 365 | * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID |
@@ -415,7 +415,7 @@ struct llc_frmr_info { | |||
415 | u8 curr_ssv; /* current send state variable val */ | 415 | u8 curr_ssv; /* current send state variable val */ |
416 | u8 curr_rsv; /* current receive state variable */ | 416 | u8 curr_rsv; /* current receive state variable */ |
417 | u8 ind_bits; /* indicator bits set with macro */ | 417 | u8 ind_bits; /* indicator bits set with macro */ |
418 | }; | 418 | } __packed; |
419 | 419 | ||
420 | extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type); | 420 | extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type); |
421 | extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value); | 421 | extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value); |
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 505845ddb0be..01e094c6d0ae 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -115,7 +115,6 @@ | |||
115 | * sctp/protocol.c | 115 | * sctp/protocol.c |
116 | */ | 116 | */ |
117 | extern struct sock *sctp_get_ctl_sock(void); | 117 | extern struct sock *sctp_get_ctl_sock(void); |
118 | extern void sctp_local_addr_free(struct rcu_head *head); | ||
119 | extern int sctp_copy_local_addr_list(struct sctp_bind_addr *, | 118 | extern int sctp_copy_local_addr_list(struct sctp_bind_addr *, |
120 | sctp_scope_t, gfp_t gfp, | 119 | sctp_scope_t, gfp_t gfp, |
121 | int flags); | 120 | int flags); |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 6ae4bc5ce8a7..20afeaa39395 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -324,6 +324,7 @@ struct xfrm_state_afinfo { | |||
324 | int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); | 324 | int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); |
325 | int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); | 325 | int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); |
326 | int (*output)(struct sk_buff *skb); | 326 | int (*output)(struct sk_buff *skb); |
327 | int (*output_finish)(struct sk_buff *skb); | ||
327 | int (*extract_input)(struct xfrm_state *x, | 328 | int (*extract_input)(struct xfrm_state *x, |
328 | struct sk_buff *skb); | 329 | struct sk_buff *skb); |
329 | int (*extract_output)(struct xfrm_state *x, | 330 | int (*extract_output)(struct xfrm_state *x, |
@@ -1454,6 +1455,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) | |||
1454 | extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); | 1455 | extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); |
1455 | extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); | 1456 | extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); |
1456 | extern int xfrm4_output(struct sk_buff *skb); | 1457 | extern int xfrm4_output(struct sk_buff *skb); |
1458 | extern int xfrm4_output_finish(struct sk_buff *skb); | ||
1457 | extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); | 1459 | extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); |
1458 | extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); | 1460 | extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); |
1459 | extern int xfrm6_extract_header(struct sk_buff *skb); | 1461 | extern int xfrm6_extract_header(struct sk_buff *skb); |
@@ -1470,6 +1472,7 @@ extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr); | |||
1470 | extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); | 1472 | extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); |
1471 | extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); | 1473 | extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); |
1472 | extern int xfrm6_output(struct sk_buff *skb); | 1474 | extern int xfrm6_output(struct sk_buff *skb); |
1475 | extern int xfrm6_output_finish(struct sk_buff *skb); | ||
1473 | extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, | 1476 | extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, |
1474 | u8 **prevhdr); | 1477 | u8 **prevhdr); |
1475 | 1478 | ||
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h index cbb822e8d791..2d0191c90f9e 100644 --- a/include/rdma/iw_cm.h +++ b/include/rdma/iw_cm.h | |||
@@ -46,18 +46,9 @@ enum iw_cm_event_type { | |||
46 | IW_CM_EVENT_CLOSE /* close complete */ | 46 | IW_CM_EVENT_CLOSE /* close complete */ |
47 | }; | 47 | }; |
48 | 48 | ||
49 | enum iw_cm_event_status { | ||
50 | IW_CM_EVENT_STATUS_OK = 0, /* request successful */ | ||
51 | IW_CM_EVENT_STATUS_ACCEPTED = 0, /* connect request accepted */ | ||
52 | IW_CM_EVENT_STATUS_REJECTED, /* connect request rejected */ | ||
53 | IW_CM_EVENT_STATUS_TIMEOUT, /* the operation timed out */ | ||
54 | IW_CM_EVENT_STATUS_RESET, /* reset from remote peer */ | ||
55 | IW_CM_EVENT_STATUS_EINVAL, /* asynchronous failure for bad parm */ | ||
56 | }; | ||
57 | |||
58 | struct iw_cm_event { | 49 | struct iw_cm_event { |
59 | enum iw_cm_event_type event; | 50 | enum iw_cm_event_type event; |
60 | enum iw_cm_event_status status; | 51 | int status; |
61 | struct sockaddr_in local_addr; | 52 | struct sockaddr_in local_addr; |
62 | struct sockaddr_in remote_addr; | 53 | struct sockaddr_in remote_addr; |
63 | void *private_data; | 54 | void *private_data; |
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 4fae90304648..169f7a53fb0c 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h | |||
@@ -329,4 +329,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr); | |||
329 | */ | 329 | */ |
330 | void rdma_set_service_type(struct rdma_cm_id *id, int tos); | 330 | void rdma_set_service_type(struct rdma_cm_id *id, int tos); |
331 | 331 | ||
332 | /** | ||
333 | * rdma_set_reuseaddr - Allow the reuse of local addresses when binding | ||
334 | * the rdma_cm_id. | ||
335 | * @id: Communication identifier to configure. | ||
336 | * @reuse: Value indicating if the bound address is reusable. | ||
337 | * | ||
338 | * Reuse must be set before an address is bound to the id. | ||
339 | */ | ||
340 | int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse); | ||
341 | |||
332 | #endif /* RDMA_CM_H */ | 342 | #endif /* RDMA_CM_H */ |
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h index 1d165022c02d..fc82c1896f75 100644 --- a/include/rdma/rdma_user_cm.h +++ b/include/rdma/rdma_user_cm.h | |||
@@ -221,8 +221,9 @@ enum { | |||
221 | 221 | ||
222 | /* Option details */ | 222 | /* Option details */ |
223 | enum { | 223 | enum { |
224 | RDMA_OPTION_ID_TOS = 0, | 224 | RDMA_OPTION_ID_TOS = 0, |
225 | RDMA_OPTION_IB_PATH = 1 | 225 | RDMA_OPTION_ID_REUSEADDR = 1, |
226 | RDMA_OPTION_IB_PATH = 1 | ||
226 | }; | 227 | }; |
227 | 228 | ||
228 | struct rdma_ucm_set_option { | 229 | struct rdma_ucm_set_option { |
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 2d3ec5094685..dd82e02ddde3 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h | |||
@@ -169,6 +169,7 @@ struct scsi_device { | |||
169 | sdev_dev; | 169 | sdev_dev; |
170 | 170 | ||
171 | struct execute_work ew; /* used to get process context on put */ | 171 | struct execute_work ew; /* used to get process context on put */ |
172 | struct work_struct requeue_work; | ||
172 | 173 | ||
173 | struct scsi_dh_data *scsi_dh_data; | 174 | struct scsi_dh_data *scsi_dh_data; |
174 | enum scsi_device_state sdev_state; | 175 | enum scsi_device_state sdev_state; |
diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h index e3615c093741..9fe3a36646e9 100644 --- a/include/trace/events/gfpflags.h +++ b/include/trace/events/gfpflags.h | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | #define show_gfp_flags(flags) \ | 11 | #define show_gfp_flags(flags) \ |
12 | (flags) ? __print_flags(flags, "|", \ | 12 | (flags) ? __print_flags(flags, "|", \ |
13 | {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \ | ||
13 | {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ | 14 | {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ |
14 | {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ | 15 | {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ |
15 | {(unsigned long)GFP_USER, "GFP_USER"}, \ | 16 | {(unsigned long)GFP_USER, "GFP_USER"}, \ |
@@ -32,6 +33,9 @@ | |||
32 | {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ | 33 | {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ |
33 | {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ | 34 | {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ |
34 | {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ | 35 | {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ |
35 | {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \ | 36 | {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \ |
37 | {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \ | ||
38 | {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \ | ||
39 | {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \ | ||
36 | ) : "GFP_NOWAIT" | 40 | ) : "GFP_NOWAIT" |
37 | 41 | ||
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index 1c09820df585..ae045ca7d356 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h | |||
@@ -20,8 +20,7 @@ struct softirq_action; | |||
20 | softirq_name(BLOCK_IOPOLL), \ | 20 | softirq_name(BLOCK_IOPOLL), \ |
21 | softirq_name(TASKLET), \ | 21 | softirq_name(TASKLET), \ |
22 | softirq_name(SCHED), \ | 22 | softirq_name(SCHED), \ |
23 | softirq_name(HRTIMER), \ | 23 | softirq_name(HRTIMER)) |
24 | softirq_name(RCU)) | ||
25 | 24 | ||
26 | /** | 25 | /** |
27 | * irq_handler_entry - called immediately before the irq action handler | 26 | * irq_handler_entry - called immediately before the irq action handler |
diff --git a/include/xen/events.h b/include/xen/events.h index f1b87ad48ac7..9af21e19545a 100644 --- a/include/xen/events.h +++ b/include/xen/events.h | |||
@@ -85,7 +85,8 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
85 | int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc); | 85 | int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc); |
86 | /* Bind an PSI pirq to an irq. */ | 86 | /* Bind an PSI pirq to an irq. */ |
87 | int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | 87 | int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, |
88 | int pirq, int vector, const char *name); | 88 | int pirq, int vector, const char *name, |
89 | domid_t domid); | ||
89 | #endif | 90 | #endif |
90 | 91 | ||
91 | /* De-allocates the above mentioned physical interrupt. */ | 92 | /* De-allocates the above mentioned physical interrupt. */ |
@@ -94,4 +95,10 @@ int xen_destroy_irq(int irq); | |||
94 | /* Return irq from pirq */ | 95 | /* Return irq from pirq */ |
95 | int xen_irq_from_pirq(unsigned pirq); | 96 | int xen_irq_from_pirq(unsigned pirq); |
96 | 97 | ||
98 | /* Return the pirq allocated to the irq. */ | ||
99 | int xen_pirq_from_irq(unsigned irq); | ||
100 | |||
101 | /* Determine whether to ignore this IRQ if it is passed to a guest. */ | ||
102 | int xen_test_irq_shared(int irq); | ||
103 | |||
97 | #endif /* _XEN_EVENTS_H */ | 104 | #endif /* _XEN_EVENTS_H */ |