diff options
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/acpi.h | 4 | ||||
| -rw-r--r-- | include/linux/cgroup.h | 12 | ||||
| -rw-r--r-- | include/linux/i2c/sx150x.h | 4 | ||||
| -rw-r--r-- | include/linux/intel-gtt.h | 20 | ||||
| -rw-r--r-- | include/linux/io-mapping.h | 24 | ||||
| -rw-r--r-- | include/linux/kfifo.h | 58 | ||||
| -rw-r--r-- | include/linux/ksm.h | 20 | ||||
| -rw-r--r-- | include/linux/lglock.h | 4 | ||||
| -rw-r--r-- | include/linux/libata.h | 4 | ||||
| -rw-r--r-- | include/linux/mm.h | 6 | ||||
| -rw-r--r-- | include/linux/mmc/sdio.h | 2 | ||||
| -rw-r--r-- | include/linux/mmzone.h | 13 | ||||
| -rw-r--r-- | include/linux/mutex.h | 8 | ||||
| -rw-r--r-- | include/linux/pci.h | 3 | ||||
| -rw-r--r-- | include/linux/pci_ids.h | 2 | ||||
| -rw-r--r-- | include/linux/percpu.h | 2 | ||||
| -rw-r--r-- | include/linux/semaphore.h | 3 | ||||
| -rw-r--r-- | include/linux/serial.h | 3 | ||||
| -rw-r--r-- | include/linux/serial_core.h | 3 | ||||
| -rw-r--r-- | include/linux/swap.h | 11 | ||||
| -rw-r--r-- | include/linux/vmstat.h | 22 | ||||
| -rw-r--r-- | include/linux/workqueue.h | 18 |
22 files changed, 169 insertions, 77 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index ccf94dc5acdf..c227757feb06 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
| @@ -304,8 +304,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); | |||
| 304 | OSC_PCI_EXPRESS_PME_CONTROL | \ | 304 | OSC_PCI_EXPRESS_PME_CONTROL | \ |
| 305 | OSC_PCI_EXPRESS_AER_CONTROL | \ | 305 | OSC_PCI_EXPRESS_AER_CONTROL | \ |
| 306 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL) | 306 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL) |
| 307 | 307 | extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, | |
| 308 | extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags); | 308 | u32 *mask, u32 req); |
| 309 | extern void acpi_early_init(void); | 309 | extern void acpi_early_init(void); |
| 310 | 310 | ||
| 311 | #else /* !CONFIG_ACPI */ | 311 | #else /* !CONFIG_ACPI */ |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ed3e92e41c6e..0c991023ee47 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
| @@ -578,7 +578,12 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp, | |||
| 578 | void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); | 578 | void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); |
| 579 | int cgroup_scan_tasks(struct cgroup_scanner *scan); | 579 | int cgroup_scan_tasks(struct cgroup_scanner *scan); |
| 580 | int cgroup_attach_task(struct cgroup *, struct task_struct *); | 580 | int cgroup_attach_task(struct cgroup *, struct task_struct *); |
| 581 | int cgroup_attach_task_current_cg(struct task_struct *); | 581 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); |
| 582 | |||
| 583 | static inline int cgroup_attach_task_current_cg(struct task_struct *tsk) | ||
| 584 | { | ||
| 585 | return cgroup_attach_task_all(current, tsk); | ||
| 586 | } | ||
| 582 | 587 | ||
| 583 | /* | 588 | /* |
| 584 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works | 589 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works |
| @@ -636,6 +641,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats, | |||
| 636 | } | 641 | } |
| 637 | 642 | ||
| 638 | /* No cgroups - nothing to do */ | 643 | /* No cgroups - nothing to do */ |
| 644 | static inline int cgroup_attach_task_all(struct task_struct *from, | ||
| 645 | struct task_struct *t) | ||
| 646 | { | ||
| 647 | return 0; | ||
| 648 | } | ||
| 639 | static inline int cgroup_attach_task_current_cg(struct task_struct *t) | 649 | static inline int cgroup_attach_task_current_cg(struct task_struct *t) |
| 640 | { | 650 | { |
| 641 | return 0; | 651 | return 0; |
diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h index ee3049cb9ba5..52baa79d69a7 100644 --- a/include/linux/i2c/sx150x.h +++ b/include/linux/i2c/sx150x.h | |||
| @@ -63,6 +63,9 @@ | |||
| 63 | * IRQ lines will appear. Similarly to gpio_base, the expander | 63 | * IRQ lines will appear. Similarly to gpio_base, the expander |
| 64 | * will create a block of irqs beginning at this number. | 64 | * will create a block of irqs beginning at this number. |
| 65 | * This value is ignored if irq_summary is < 0. | 65 | * This value is ignored if irq_summary is < 0. |
| 66 | * @reset_during_probe: If set to true, the driver will trigger a full | ||
| 67 | * reset of the chip at the beginning of the probe | ||
| 68 | * in order to place it in a known state. | ||
| 66 | */ | 69 | */ |
| 67 | struct sx150x_platform_data { | 70 | struct sx150x_platform_data { |
| 68 | unsigned gpio_base; | 71 | unsigned gpio_base; |
| @@ -73,6 +76,7 @@ struct sx150x_platform_data { | |||
| 73 | u16 io_polarity; | 76 | u16 io_polarity; |
| 74 | int irq_summary; | 77 | int irq_summary; |
| 75 | unsigned irq_base; | 78 | unsigned irq_base; |
| 79 | bool reset_during_probe; | ||
| 76 | }; | 80 | }; |
| 77 | 81 | ||
| 78 | #endif /* __LINUX_I2C_SX150X_H */ | 82 | #endif /* __LINUX_I2C_SX150X_H */ |
diff --git a/include/linux/intel-gtt.h b/include/linux/intel-gtt.h new file mode 100644 index 000000000000..1d19ab2afa39 --- /dev/null +++ b/include/linux/intel-gtt.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | /* | ||
| 2 | * Common Intel AGPGART and GTT definitions. | ||
| 3 | */ | ||
| 4 | #ifndef _INTEL_GTT_H | ||
| 5 | #define _INTEL_GTT_H | ||
| 6 | |||
| 7 | #include <linux/agp_backend.h> | ||
| 8 | |||
| 9 | /* This is for Intel only GTT controls. | ||
| 10 | * | ||
| 11 | * Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only | ||
| 12 | */ | ||
| 13 | |||
| 14 | #define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2) | ||
| 15 | #define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4) | ||
| 16 | |||
| 17 | /* flag for GFDT type */ | ||
| 18 | #define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 0a6b3d5c490c..7fb592793738 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h | |||
| @@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping) | |||
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | /* Atomic map/unmap */ | 81 | /* Atomic map/unmap */ |
| 82 | static inline void * | 82 | static inline void __iomem * |
| 83 | io_mapping_map_atomic_wc(struct io_mapping *mapping, | 83 | io_mapping_map_atomic_wc(struct io_mapping *mapping, |
| 84 | unsigned long offset, | 84 | unsigned long offset, |
| 85 | int slot) | 85 | int slot) |
| @@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping, | |||
| 94 | } | 94 | } |
| 95 | 95 | ||
| 96 | static inline void | 96 | static inline void |
| 97 | io_mapping_unmap_atomic(void *vaddr, int slot) | 97 | io_mapping_unmap_atomic(void __iomem *vaddr, int slot) |
| 98 | { | 98 | { |
| 99 | iounmap_atomic(vaddr, slot); | 99 | iounmap_atomic(vaddr, slot); |
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | static inline void * | 102 | static inline void __iomem * |
| 103 | io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) | 103 | io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) |
| 104 | { | 104 | { |
| 105 | resource_size_t phys_addr; | 105 | resource_size_t phys_addr; |
| @@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) | |||
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | static inline void | 113 | static inline void |
| 114 | io_mapping_unmap(void *vaddr) | 114 | io_mapping_unmap(void __iomem *vaddr) |
| 115 | { | 115 | { |
| 116 | iounmap(vaddr); | 116 | iounmap(vaddr); |
| 117 | } | 117 | } |
| @@ -125,38 +125,38 @@ struct io_mapping; | |||
| 125 | static inline struct io_mapping * | 125 | static inline struct io_mapping * |
| 126 | io_mapping_create_wc(resource_size_t base, unsigned long size) | 126 | io_mapping_create_wc(resource_size_t base, unsigned long size) |
| 127 | { | 127 | { |
| 128 | return (struct io_mapping *) ioremap_wc(base, size); | 128 | return (struct io_mapping __force *) ioremap_wc(base, size); |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | static inline void | 131 | static inline void |
| 132 | io_mapping_free(struct io_mapping *mapping) | 132 | io_mapping_free(struct io_mapping *mapping) |
| 133 | { | 133 | { |
| 134 | iounmap(mapping); | 134 | iounmap((void __force __iomem *) mapping); |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | /* Atomic map/unmap */ | 137 | /* Atomic map/unmap */ |
| 138 | static inline void * | 138 | static inline void __iomem * |
| 139 | io_mapping_map_atomic_wc(struct io_mapping *mapping, | 139 | io_mapping_map_atomic_wc(struct io_mapping *mapping, |
| 140 | unsigned long offset, | 140 | unsigned long offset, |
| 141 | int slot) | 141 | int slot) |
| 142 | { | 142 | { |
| 143 | return ((char *) mapping) + offset; | 143 | return ((char __force __iomem *) mapping) + offset; |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | static inline void | 146 | static inline void |
| 147 | io_mapping_unmap_atomic(void *vaddr, int slot) | 147 | io_mapping_unmap_atomic(void __iomem *vaddr, int slot) |
| 148 | { | 148 | { |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | /* Non-atomic map/unmap */ | 151 | /* Non-atomic map/unmap */ |
| 152 | static inline void * | 152 | static inline void __iomem * |
| 153 | io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) | 153 | io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) |
| 154 | { | 154 | { |
| 155 | return ((char *) mapping) + offset; | 155 | return ((char __force __iomem *) mapping) + offset; |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | static inline void | 158 | static inline void |
| 159 | io_mapping_unmap(void *vaddr) | 159 | io_mapping_unmap(void __iomem *vaddr) |
| 160 | { | 160 | { |
| 161 | } | 161 | } |
| 162 | 162 | ||
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 4aa95f203f3e..62dbee554f60 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h | |||
| @@ -214,7 +214,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
| 214 | */ | 214 | */ |
| 215 | #define kfifo_reset(fifo) \ | 215 | #define kfifo_reset(fifo) \ |
| 216 | (void)({ \ | 216 | (void)({ \ |
| 217 | typeof(fifo + 1) __tmp = (fifo); \ | 217 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 218 | __tmp->kfifo.in = __tmp->kfifo.out = 0; \ | 218 | __tmp->kfifo.in = __tmp->kfifo.out = 0; \ |
| 219 | }) | 219 | }) |
| 220 | 220 | ||
| @@ -228,7 +228,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
| 228 | */ | 228 | */ |
| 229 | #define kfifo_reset_out(fifo) \ | 229 | #define kfifo_reset_out(fifo) \ |
| 230 | (void)({ \ | 230 | (void)({ \ |
| 231 | typeof(fifo + 1) __tmp = (fifo); \ | 231 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 232 | __tmp->kfifo.out = __tmp->kfifo.in; \ | 232 | __tmp->kfifo.out = __tmp->kfifo.in; \ |
| 233 | }) | 233 | }) |
| 234 | 234 | ||
| @@ -238,7 +238,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
| 238 | */ | 238 | */ |
| 239 | #define kfifo_len(fifo) \ | 239 | #define kfifo_len(fifo) \ |
| 240 | ({ \ | 240 | ({ \ |
| 241 | typeof(fifo + 1) __tmpl = (fifo); \ | 241 | typeof((fifo) + 1) __tmpl = (fifo); \ |
| 242 | __tmpl->kfifo.in - __tmpl->kfifo.out; \ | 242 | __tmpl->kfifo.in - __tmpl->kfifo.out; \ |
| 243 | }) | 243 | }) |
| 244 | 244 | ||
| @@ -248,7 +248,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
| 248 | */ | 248 | */ |
| 249 | #define kfifo_is_empty(fifo) \ | 249 | #define kfifo_is_empty(fifo) \ |
| 250 | ({ \ | 250 | ({ \ |
| 251 | typeof(fifo + 1) __tmpq = (fifo); \ | 251 | typeof((fifo) + 1) __tmpq = (fifo); \ |
| 252 | __tmpq->kfifo.in == __tmpq->kfifo.out; \ | 252 | __tmpq->kfifo.in == __tmpq->kfifo.out; \ |
| 253 | }) | 253 | }) |
| 254 | 254 | ||
| @@ -258,7 +258,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
| 258 | */ | 258 | */ |
| 259 | #define kfifo_is_full(fifo) \ | 259 | #define kfifo_is_full(fifo) \ |
| 260 | ({ \ | 260 | ({ \ |
| 261 | typeof(fifo + 1) __tmpq = (fifo); \ | 261 | typeof((fifo) + 1) __tmpq = (fifo); \ |
| 262 | kfifo_len(__tmpq) > __tmpq->kfifo.mask; \ | 262 | kfifo_len(__tmpq) > __tmpq->kfifo.mask; \ |
| 263 | }) | 263 | }) |
| 264 | 264 | ||
| @@ -269,7 +269,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
| 269 | #define kfifo_avail(fifo) \ | 269 | #define kfifo_avail(fifo) \ |
| 270 | __kfifo_must_check_helper( \ | 270 | __kfifo_must_check_helper( \ |
| 271 | ({ \ | 271 | ({ \ |
| 272 | typeof(fifo + 1) __tmpq = (fifo); \ | 272 | typeof((fifo) + 1) __tmpq = (fifo); \ |
| 273 | const size_t __recsize = sizeof(*__tmpq->rectype); \ | 273 | const size_t __recsize = sizeof(*__tmpq->rectype); \ |
| 274 | unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \ | 274 | unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \ |
| 275 | (__recsize) ? ((__avail <= __recsize) ? 0 : \ | 275 | (__recsize) ? ((__avail <= __recsize) ? 0 : \ |
| @@ -284,7 +284,7 @@ __kfifo_must_check_helper( \ | |||
| 284 | */ | 284 | */ |
| 285 | #define kfifo_skip(fifo) \ | 285 | #define kfifo_skip(fifo) \ |
| 286 | (void)({ \ | 286 | (void)({ \ |
| 287 | typeof(fifo + 1) __tmp = (fifo); \ | 287 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 288 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 288 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 289 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 289 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| 290 | if (__recsize) \ | 290 | if (__recsize) \ |
| @@ -302,7 +302,7 @@ __kfifo_must_check_helper( \ | |||
| 302 | #define kfifo_peek_len(fifo) \ | 302 | #define kfifo_peek_len(fifo) \ |
| 303 | __kfifo_must_check_helper( \ | 303 | __kfifo_must_check_helper( \ |
| 304 | ({ \ | 304 | ({ \ |
| 305 | typeof(fifo + 1) __tmp = (fifo); \ | 305 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 306 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 306 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 307 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 307 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| 308 | (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \ | 308 | (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \ |
| @@ -325,7 +325,7 @@ __kfifo_must_check_helper( \ | |||
| 325 | #define kfifo_alloc(fifo, size, gfp_mask) \ | 325 | #define kfifo_alloc(fifo, size, gfp_mask) \ |
| 326 | __kfifo_must_check_helper( \ | 326 | __kfifo_must_check_helper( \ |
| 327 | ({ \ | 327 | ({ \ |
| 328 | typeof(fifo + 1) __tmp = (fifo); \ | 328 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 329 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 329 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| 330 | __is_kfifo_ptr(__tmp) ? \ | 330 | __is_kfifo_ptr(__tmp) ? \ |
| 331 | __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ | 331 | __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ |
| @@ -339,7 +339,7 @@ __kfifo_must_check_helper( \ | |||
| 339 | */ | 339 | */ |
| 340 | #define kfifo_free(fifo) \ | 340 | #define kfifo_free(fifo) \ |
| 341 | ({ \ | 341 | ({ \ |
| 342 | typeof(fifo + 1) __tmp = (fifo); \ | 342 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 343 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 343 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| 344 | if (__is_kfifo_ptr(__tmp)) \ | 344 | if (__is_kfifo_ptr(__tmp)) \ |
| 345 | __kfifo_free(__kfifo); \ | 345 | __kfifo_free(__kfifo); \ |
| @@ -358,7 +358,7 @@ __kfifo_must_check_helper( \ | |||
| 358 | */ | 358 | */ |
| 359 | #define kfifo_init(fifo, buffer, size) \ | 359 | #define kfifo_init(fifo, buffer, size) \ |
| 360 | ({ \ | 360 | ({ \ |
| 361 | typeof(fifo + 1) __tmp = (fifo); \ | 361 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 362 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 362 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| 363 | __is_kfifo_ptr(__tmp) ? \ | 363 | __is_kfifo_ptr(__tmp) ? \ |
| 364 | __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \ | 364 | __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \ |
| @@ -379,8 +379,8 @@ __kfifo_must_check_helper( \ | |||
| 379 | */ | 379 | */ |
| 380 | #define kfifo_put(fifo, val) \ | 380 | #define kfifo_put(fifo, val) \ |
| 381 | ({ \ | 381 | ({ \ |
| 382 | typeof(fifo + 1) __tmp = (fifo); \ | 382 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 383 | typeof(val + 1) __val = (val); \ | 383 | typeof((val) + 1) __val = (val); \ |
| 384 | unsigned int __ret; \ | 384 | unsigned int __ret; \ |
| 385 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 385 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 386 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 386 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| @@ -421,8 +421,8 @@ __kfifo_must_check_helper( \ | |||
| 421 | #define kfifo_get(fifo, val) \ | 421 | #define kfifo_get(fifo, val) \ |
| 422 | __kfifo_must_check_helper( \ | 422 | __kfifo_must_check_helper( \ |
| 423 | ({ \ | 423 | ({ \ |
| 424 | typeof(fifo + 1) __tmp = (fifo); \ | 424 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 425 | typeof(val + 1) __val = (val); \ | 425 | typeof((val) + 1) __val = (val); \ |
| 426 | unsigned int __ret; \ | 426 | unsigned int __ret; \ |
| 427 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 427 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 428 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 428 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| @@ -462,8 +462,8 @@ __kfifo_must_check_helper( \ | |||
| 462 | #define kfifo_peek(fifo, val) \ | 462 | #define kfifo_peek(fifo, val) \ |
| 463 | __kfifo_must_check_helper( \ | 463 | __kfifo_must_check_helper( \ |
| 464 | ({ \ | 464 | ({ \ |
| 465 | typeof(fifo + 1) __tmp = (fifo); \ | 465 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 466 | typeof(val + 1) __val = (val); \ | 466 | typeof((val) + 1) __val = (val); \ |
| 467 | unsigned int __ret; \ | 467 | unsigned int __ret; \ |
| 468 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 468 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 469 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 469 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| @@ -501,8 +501,8 @@ __kfifo_must_check_helper( \ | |||
| 501 | */ | 501 | */ |
| 502 | #define kfifo_in(fifo, buf, n) \ | 502 | #define kfifo_in(fifo, buf, n) \ |
| 503 | ({ \ | 503 | ({ \ |
| 504 | typeof(fifo + 1) __tmp = (fifo); \ | 504 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 505 | typeof(buf + 1) __buf = (buf); \ | 505 | typeof((buf) + 1) __buf = (buf); \ |
| 506 | unsigned long __n = (n); \ | 506 | unsigned long __n = (n); \ |
| 507 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 507 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 508 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 508 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| @@ -554,8 +554,8 @@ __kfifo_must_check_helper( \ | |||
| 554 | #define kfifo_out(fifo, buf, n) \ | 554 | #define kfifo_out(fifo, buf, n) \ |
| 555 | __kfifo_must_check_helper( \ | 555 | __kfifo_must_check_helper( \ |
| 556 | ({ \ | 556 | ({ \ |
| 557 | typeof(fifo + 1) __tmp = (fifo); \ | 557 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 558 | typeof(buf + 1) __buf = (buf); \ | 558 | typeof((buf) + 1) __buf = (buf); \ |
| 559 | unsigned long __n = (n); \ | 559 | unsigned long __n = (n); \ |
| 560 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 560 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 561 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 561 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| @@ -611,7 +611,7 @@ __kfifo_must_check_helper( \ | |||
| 611 | #define kfifo_from_user(fifo, from, len, copied) \ | 611 | #define kfifo_from_user(fifo, from, len, copied) \ |
| 612 | __kfifo_must_check_helper( \ | 612 | __kfifo_must_check_helper( \ |
| 613 | ({ \ | 613 | ({ \ |
| 614 | typeof(fifo + 1) __tmp = (fifo); \ | 614 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 615 | const void __user *__from = (from); \ | 615 | const void __user *__from = (from); \ |
| 616 | unsigned int __len = (len); \ | 616 | unsigned int __len = (len); \ |
| 617 | unsigned int *__copied = (copied); \ | 617 | unsigned int *__copied = (copied); \ |
| @@ -639,7 +639,7 @@ __kfifo_must_check_helper( \ | |||
| 639 | #define kfifo_to_user(fifo, to, len, copied) \ | 639 | #define kfifo_to_user(fifo, to, len, copied) \ |
| 640 | __kfifo_must_check_helper( \ | 640 | __kfifo_must_check_helper( \ |
| 641 | ({ \ | 641 | ({ \ |
| 642 | typeof(fifo + 1) __tmp = (fifo); \ | 642 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 643 | void __user *__to = (to); \ | 643 | void __user *__to = (to); \ |
| 644 | unsigned int __len = (len); \ | 644 | unsigned int __len = (len); \ |
| 645 | unsigned int *__copied = (copied); \ | 645 | unsigned int *__copied = (copied); \ |
| @@ -666,7 +666,7 @@ __kfifo_must_check_helper( \ | |||
| 666 | */ | 666 | */ |
| 667 | #define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ | 667 | #define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ |
| 668 | ({ \ | 668 | ({ \ |
| 669 | typeof(fifo + 1) __tmp = (fifo); \ | 669 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 670 | struct scatterlist *__sgl = (sgl); \ | 670 | struct scatterlist *__sgl = (sgl); \ |
| 671 | int __nents = (nents); \ | 671 | int __nents = (nents); \ |
| 672 | unsigned int __len = (len); \ | 672 | unsigned int __len = (len); \ |
| @@ -690,7 +690,7 @@ __kfifo_must_check_helper( \ | |||
| 690 | */ | 690 | */ |
| 691 | #define kfifo_dma_in_finish(fifo, len) \ | 691 | #define kfifo_dma_in_finish(fifo, len) \ |
| 692 | (void)({ \ | 692 | (void)({ \ |
| 693 | typeof(fifo + 1) __tmp = (fifo); \ | 693 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 694 | unsigned int __len = (len); \ | 694 | unsigned int __len = (len); \ |
| 695 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 695 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 696 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 696 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| @@ -717,7 +717,7 @@ __kfifo_must_check_helper( \ | |||
| 717 | */ | 717 | */ |
| 718 | #define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ | 718 | #define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ |
| 719 | ({ \ | 719 | ({ \ |
| 720 | typeof(fifo + 1) __tmp = (fifo); \ | 720 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 721 | struct scatterlist *__sgl = (sgl); \ | 721 | struct scatterlist *__sgl = (sgl); \ |
| 722 | int __nents = (nents); \ | 722 | int __nents = (nents); \ |
| 723 | unsigned int __len = (len); \ | 723 | unsigned int __len = (len); \ |
| @@ -741,7 +741,7 @@ __kfifo_must_check_helper( \ | |||
| 741 | */ | 741 | */ |
| 742 | #define kfifo_dma_out_finish(fifo, len) \ | 742 | #define kfifo_dma_out_finish(fifo, len) \ |
| 743 | (void)({ \ | 743 | (void)({ \ |
| 744 | typeof(fifo + 1) __tmp = (fifo); \ | 744 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 745 | unsigned int __len = (len); \ | 745 | unsigned int __len = (len); \ |
| 746 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 746 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 747 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 747 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| @@ -766,8 +766,8 @@ __kfifo_must_check_helper( \ | |||
| 766 | #define kfifo_out_peek(fifo, buf, n) \ | 766 | #define kfifo_out_peek(fifo, buf, n) \ |
| 767 | __kfifo_must_check_helper( \ | 767 | __kfifo_must_check_helper( \ |
| 768 | ({ \ | 768 | ({ \ |
| 769 | typeof(fifo + 1) __tmp = (fifo); \ | 769 | typeof((fifo) + 1) __tmp = (fifo); \ |
| 770 | typeof(buf + 1) __buf = (buf); \ | 770 | typeof((buf) + 1) __buf = (buf); \ |
| 771 | unsigned long __n = (n); \ | 771 | unsigned long __n = (n); \ |
| 772 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 772 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
| 773 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 773 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 74d691ee9121..3319a6967626 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h | |||
| @@ -16,6 +16,9 @@ | |||
| 16 | struct stable_node; | 16 | struct stable_node; |
| 17 | struct mem_cgroup; | 17 | struct mem_cgroup; |
| 18 | 18 | ||
| 19 | struct page *ksm_does_need_to_copy(struct page *page, | ||
| 20 | struct vm_area_struct *vma, unsigned long address); | ||
| 21 | |||
| 19 | #ifdef CONFIG_KSM | 22 | #ifdef CONFIG_KSM |
| 20 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | 23 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
| 21 | unsigned long end, int advice, unsigned long *vm_flags); | 24 | unsigned long end, int advice, unsigned long *vm_flags); |
| @@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page, | |||
| 70 | * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, | 73 | * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, |
| 71 | * but what if the vma was unmerged while the page was swapped out? | 74 | * but what if the vma was unmerged while the page was swapped out? |
| 72 | */ | 75 | */ |
| 73 | struct page *ksm_does_need_to_copy(struct page *page, | 76 | static inline int ksm_might_need_to_copy(struct page *page, |
| 74 | struct vm_area_struct *vma, unsigned long address); | ||
| 75 | static inline struct page *ksm_might_need_to_copy(struct page *page, | ||
| 76 | struct vm_area_struct *vma, unsigned long address) | 77 | struct vm_area_struct *vma, unsigned long address) |
| 77 | { | 78 | { |
| 78 | struct anon_vma *anon_vma = page_anon_vma(page); | 79 | struct anon_vma *anon_vma = page_anon_vma(page); |
| 79 | 80 | ||
| 80 | if (!anon_vma || | 81 | return anon_vma && |
| 81 | (anon_vma->root == vma->anon_vma->root && | 82 | (anon_vma->root != vma->anon_vma->root || |
| 82 | page->index == linear_page_index(vma, address))) | 83 | page->index != linear_page_index(vma, address)); |
| 83 | return page; | ||
| 84 | |||
| 85 | return ksm_does_need_to_copy(page, vma, address); | ||
| 86 | } | 84 | } |
| 87 | 85 | ||
| 88 | int page_referenced_ksm(struct page *page, | 86 | int page_referenced_ksm(struct page *page, |
| @@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | |||
| 115 | return 0; | 113 | return 0; |
| 116 | } | 114 | } |
| 117 | 115 | ||
| 118 | static inline struct page *ksm_might_need_to_copy(struct page *page, | 116 | static inline int ksm_might_need_to_copy(struct page *page, |
| 119 | struct vm_area_struct *vma, unsigned long address) | 117 | struct vm_area_struct *vma, unsigned long address) |
| 120 | { | 118 | { |
| 121 | return page; | 119 | return 0; |
| 122 | } | 120 | } |
| 123 | 121 | ||
| 124 | static inline int page_referenced_ksm(struct page *page, | 122 | static inline int page_referenced_ksm(struct page *page, |
diff --git a/include/linux/lglock.h b/include/linux/lglock.h index b288cb713b90..f549056fb20b 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h | |||
| @@ -150,7 +150,7 @@ | |||
| 150 | int i; \ | 150 | int i; \ |
| 151 | preempt_disable(); \ | 151 | preempt_disable(); \ |
| 152 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ | 152 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ |
| 153 | for_each_online_cpu(i) { \ | 153 | for_each_possible_cpu(i) { \ |
| 154 | arch_spinlock_t *lock; \ | 154 | arch_spinlock_t *lock; \ |
| 155 | lock = &per_cpu(name##_lock, i); \ | 155 | lock = &per_cpu(name##_lock, i); \ |
| 156 | arch_spin_lock(lock); \ | 156 | arch_spin_lock(lock); \ |
| @@ -161,7 +161,7 @@ | |||
| 161 | void name##_global_unlock(void) { \ | 161 | void name##_global_unlock(void) { \ |
| 162 | int i; \ | 162 | int i; \ |
| 163 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ | 163 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ |
| 164 | for_each_online_cpu(i) { \ | 164 | for_each_possible_cpu(i) { \ |
| 165 | arch_spinlock_t *lock; \ | 165 | arch_spinlock_t *lock; \ |
| 166 | lock = &per_cpu(name##_lock, i); \ | 166 | lock = &per_cpu(name##_lock, i); \ |
| 167 | arch_spin_unlock(lock); \ | 167 | arch_spin_unlock(lock); \ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index f010f18a0f86..45fb2967b66d 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -335,6 +335,7 @@ enum { | |||
| 335 | ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ | 335 | ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ |
| 336 | ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ | 336 | ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ |
| 337 | ATA_EHI_QUIET = (1 << 3), /* be quiet */ | 337 | ATA_EHI_QUIET = (1 << 3), /* be quiet */ |
| 338 | ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */ | ||
| 338 | 339 | ||
| 339 | ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ | 340 | ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ |
| 340 | ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ | 341 | ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ |
| @@ -723,6 +724,7 @@ struct ata_port { | |||
| 723 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ | 724 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ |
| 724 | u8 ctl; /* cache of ATA control register */ | 725 | u8 ctl; /* cache of ATA control register */ |
| 725 | u8 last_ctl; /* Cache last written value */ | 726 | u8 last_ctl; /* Cache last written value */ |
| 727 | struct ata_link* sff_pio_task_link; /* link currently used */ | ||
| 726 | struct delayed_work sff_pio_task; | 728 | struct delayed_work sff_pio_task; |
| 727 | #ifdef CONFIG_ATA_BMDMA | 729 | #ifdef CONFIG_ATA_BMDMA |
| 728 | struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ | 730 | struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ |
| @@ -1594,7 +1596,7 @@ extern void ata_sff_irq_on(struct ata_port *ap); | |||
| 1594 | extern void ata_sff_irq_clear(struct ata_port *ap); | 1596 | extern void ata_sff_irq_clear(struct ata_port *ap); |
| 1595 | extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | 1597 | extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, |
| 1596 | u8 status, int in_wq); | 1598 | u8 status, int in_wq); |
| 1597 | extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay); | 1599 | extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay); |
| 1598 | extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); | 1600 | extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); |
| 1599 | extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); | 1601 | extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); |
| 1600 | extern unsigned int ata_sff_port_intr(struct ata_port *ap, | 1602 | extern unsigned int ata_sff_port_intr(struct ata_port *ap, |
diff --git a/include/linux/mm.h b/include/linux/mm.h index e6b1210772ce..74949fbef8c6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -864,6 +864,12 @@ int set_page_dirty(struct page *page); | |||
| 864 | int set_page_dirty_lock(struct page *page); | 864 | int set_page_dirty_lock(struct page *page); |
| 865 | int clear_page_dirty_for_io(struct page *page); | 865 | int clear_page_dirty_for_io(struct page *page); |
| 866 | 866 | ||
| 867 | /* Is the vma a continuation of the stack vma above it? */ | ||
| 868 | static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) | ||
| 869 | { | ||
| 870 | return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); | ||
| 871 | } | ||
| 872 | |||
| 867 | extern unsigned long move_page_tables(struct vm_area_struct *vma, | 873 | extern unsigned long move_page_tables(struct vm_area_struct *vma, |
| 868 | unsigned long old_addr, struct vm_area_struct *new_vma, | 874 | unsigned long old_addr, struct vm_area_struct *new_vma, |
| 869 | unsigned long new_addr, unsigned long len); | 875 | unsigned long new_addr, unsigned long len); |
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h index 329a8faa6e37..245cdacee544 100644 --- a/include/linux/mmc/sdio.h +++ b/include/linux/mmc/sdio.h | |||
| @@ -38,6 +38,8 @@ | |||
| 38 | * [8:0] Byte/block count | 38 | * [8:0] Byte/block count |
| 39 | */ | 39 | */ |
| 40 | 40 | ||
| 41 | #define R4_MEMORY_PRESENT (1 << 27) | ||
| 42 | |||
| 41 | /* | 43 | /* |
| 42 | SDIO status in R5 | 44 | SDIO status in R5 |
| 43 | Type | 45 | Type |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 6e6e62648a4d..3984c4eb41fd 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -284,6 +284,13 @@ struct zone { | |||
| 284 | unsigned long watermark[NR_WMARK]; | 284 | unsigned long watermark[NR_WMARK]; |
| 285 | 285 | ||
| 286 | /* | 286 | /* |
| 287 | * When free pages are below this point, additional steps are taken | ||
| 288 | * when reading the number of free pages to avoid per-cpu counter | ||
| 289 | * drift allowing watermarks to be breached | ||
| 290 | */ | ||
| 291 | unsigned long percpu_drift_mark; | ||
| 292 | |||
| 293 | /* | ||
| 287 | * We don't know if the memory that we're going to allocate will be freeable | 294 | * We don't know if the memory that we're going to allocate will be freeable |
| 288 | * or/and it will be released eventually, so to avoid totally wasting several | 295 | * or/and it will be released eventually, so to avoid totally wasting several |
| 289 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk | 296 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk |
| @@ -441,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone) | |||
| 441 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); | 448 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); |
| 442 | } | 449 | } |
| 443 | 450 | ||
| 451 | #ifdef CONFIG_SMP | ||
| 452 | unsigned long zone_nr_free_pages(struct zone *zone); | ||
| 453 | #else | ||
| 454 | #define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES) | ||
| 455 | #endif /* CONFIG_SMP */ | ||
| 456 | |||
| 444 | /* | 457 | /* |
| 445 | * The "priority" of VM scanning is how much of the queues we will scan in one | 458 | * The "priority" of VM scanning is how much of the queues we will scan in one |
| 446 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | 459 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 878cab4f5fcc..f363bc8fdc74 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
| @@ -78,6 +78,14 @@ struct mutex_waiter { | |||
| 78 | # include <linux/mutex-debug.h> | 78 | # include <linux/mutex-debug.h> |
| 79 | #else | 79 | #else |
| 80 | # define __DEBUG_MUTEX_INITIALIZER(lockname) | 80 | # define __DEBUG_MUTEX_INITIALIZER(lockname) |
| 81 | /** | ||
| 82 | * mutex_init - initialize the mutex | ||
| 83 | * @mutex: the mutex to be initialized | ||
| 84 | * | ||
| 85 | * Initialize the mutex to unlocked state. | ||
| 86 | * | ||
| 87 | * It is not allowed to initialize an already locked mutex. | ||
| 88 | */ | ||
| 81 | # define mutex_init(mutex) \ | 89 | # define mutex_init(mutex) \ |
| 82 | do { \ | 90 | do { \ |
| 83 | static struct lock_class_key __key; \ | 91 | static struct lock_class_key __key; \ |
diff --git a/include/linux/pci.h b/include/linux/pci.h index b1d17956a153..c8d95e369ff4 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -1214,6 +1214,9 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, | |||
| 1214 | unsigned int devfn) | 1214 | unsigned int devfn) |
| 1215 | { return NULL; } | 1215 | { return NULL; } |
| 1216 | 1216 | ||
| 1217 | static inline int pci_domain_nr(struct pci_bus *bus) | ||
| 1218 | { return 0; } | ||
| 1219 | |||
| 1217 | #define dev_is_pci(d) (false) | 1220 | #define dev_is_pci(d) (false) |
| 1218 | #define dev_is_pf(d) (false) | 1221 | #define dev_is_pf(d) (false) |
| 1219 | #define dev_num_vf(d) (0) | 1222 | #define dev_num_vf(d) (0) |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index f6a3b2d36cad..10d33309e9a6 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
| @@ -2300,6 +2300,8 @@ | |||
| 2300 | #define PCI_DEVICE_ID_P2010 0x0079 | 2300 | #define PCI_DEVICE_ID_P2010 0x0079 |
| 2301 | #define PCI_DEVICE_ID_P1020E 0x0100 | 2301 | #define PCI_DEVICE_ID_P1020E 0x0100 |
| 2302 | #define PCI_DEVICE_ID_P1020 0x0101 | 2302 | #define PCI_DEVICE_ID_P1020 0x0101 |
| 2303 | #define PCI_DEVICE_ID_P1021E 0x0102 | ||
| 2304 | #define PCI_DEVICE_ID_P1021 0x0103 | ||
| 2303 | #define PCI_DEVICE_ID_P1011E 0x0108 | 2305 | #define PCI_DEVICE_ID_P1011E 0x0108 |
| 2304 | #define PCI_DEVICE_ID_P1011 0x0109 | 2306 | #define PCI_DEVICE_ID_P1011 0x0109 |
| 2305 | #define PCI_DEVICE_ID_P1022E 0x0110 | 2307 | #define PCI_DEVICE_ID_P1022E 0x0110 |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index b8b9084527b1..49466b13c5c6 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
| @@ -149,7 +149,7 @@ extern void __init percpu_init_late(void); | |||
| 149 | 149 | ||
| 150 | #else /* CONFIG_SMP */ | 150 | #else /* CONFIG_SMP */ |
| 151 | 151 | ||
| 152 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) | 152 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); }) |
| 153 | 153 | ||
| 154 | /* can't distinguish from other static vars, always false */ | 154 | /* can't distinguish from other static vars, always false */ |
| 155 | static inline bool is_kernel_percpu_address(unsigned long addr) | 155 | static inline bool is_kernel_percpu_address(unsigned long addr) |
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h index 7415839ac890..5310d27abd2a 100644 --- a/include/linux/semaphore.h +++ b/include/linux/semaphore.h | |||
| @@ -26,6 +26,9 @@ struct semaphore { | |||
| 26 | .wait_list = LIST_HEAD_INIT((name).wait_list), \ | 26 | .wait_list = LIST_HEAD_INIT((name).wait_list), \ |
| 27 | } | 27 | } |
| 28 | 28 | ||
| 29 | #define DEFINE_SEMAPHORE(name) \ | ||
| 30 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) | ||
| 31 | |||
| 29 | #define DECLARE_MUTEX(name) \ | 32 | #define DECLARE_MUTEX(name) \ |
| 30 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) | 33 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) |
| 31 | 34 | ||
diff --git a/include/linux/serial.h b/include/linux/serial.h index 1ebc694a6d52..ef914061511e 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h | |||
| @@ -77,8 +77,7 @@ struct serial_struct { | |||
| 77 | #define PORT_16654 11 | 77 | #define PORT_16654 11 |
| 78 | #define PORT_16850 12 | 78 | #define PORT_16850 12 |
| 79 | #define PORT_RSA 13 /* RSA-DV II/S card */ | 79 | #define PORT_RSA 13 /* RSA-DV II/S card */ |
| 80 | #define PORT_U6_16550A 14 | 80 | #define PORT_MAX 13 |
| 81 | #define PORT_MAX 14 | ||
| 82 | 81 | ||
| 83 | #define SERIAL_IO_PORT 0 | 82 | #define SERIAL_IO_PORT 0 |
| 84 | #define SERIAL_IO_HUB6 1 | 83 | #define SERIAL_IO_HUB6 1 |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 64458a9a8938..563e23400913 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
| @@ -44,7 +44,8 @@ | |||
| 44 | #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ | 44 | #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ |
| 45 | #define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ | 45 | #define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ |
| 46 | #define PORT_AR7 18 /* Texas Instruments AR7 internal UART */ | 46 | #define PORT_AR7 18 /* Texas Instruments AR7 internal UART */ |
| 47 | #define PORT_MAX_8250 18 /* max port ID */ | 47 | #define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */ |
| 48 | #define PORT_MAX_8250 19 /* max port ID */ | ||
| 48 | 49 | ||
| 49 | /* | 50 | /* |
| 50 | * ARM specific type numbers. These are not currently guaranteed | 51 | * ARM specific type numbers. These are not currently guaranteed |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 2fee51a11b73..7cdd63366f88 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
| @@ -19,6 +19,7 @@ struct bio; | |||
| 19 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ | 19 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ |
| 20 | #define SWAP_FLAG_PRIO_MASK 0x7fff | 20 | #define SWAP_FLAG_PRIO_MASK 0x7fff |
| 21 | #define SWAP_FLAG_PRIO_SHIFT 0 | 21 | #define SWAP_FLAG_PRIO_SHIFT 0 |
| 22 | #define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */ | ||
| 22 | 23 | ||
| 23 | static inline int current_is_kswapd(void) | 24 | static inline int current_is_kswapd(void) |
| 24 | { | 25 | { |
| @@ -142,7 +143,7 @@ struct swap_extent { | |||
| 142 | enum { | 143 | enum { |
| 143 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ | 144 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ |
| 144 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ | 145 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ |
| 145 | SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */ | 146 | SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */ |
| 146 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ | 147 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ |
| 147 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ | 148 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ |
| 148 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ | 149 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ |
| @@ -315,6 +316,7 @@ extern long nr_swap_pages; | |||
| 315 | extern long total_swap_pages; | 316 | extern long total_swap_pages; |
| 316 | extern void si_swapinfo(struct sysinfo *); | 317 | extern void si_swapinfo(struct sysinfo *); |
| 317 | extern swp_entry_t get_swap_page(void); | 318 | extern swp_entry_t get_swap_page(void); |
| 319 | extern swp_entry_t get_swap_page_of_type(int); | ||
| 318 | extern int valid_swaphandles(swp_entry_t, unsigned long *); | 320 | extern int valid_swaphandles(swp_entry_t, unsigned long *); |
| 319 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); | 321 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); |
| 320 | extern void swap_shmem_alloc(swp_entry_t); | 322 | extern void swap_shmem_alloc(swp_entry_t); |
| @@ -331,13 +333,6 @@ extern int reuse_swap_page(struct page *); | |||
| 331 | extern int try_to_free_swap(struct page *); | 333 | extern int try_to_free_swap(struct page *); |
| 332 | struct backing_dev_info; | 334 | struct backing_dev_info; |
| 333 | 335 | ||
| 334 | #ifdef CONFIG_HIBERNATION | ||
| 335 | void hibernation_freeze_swap(void); | ||
| 336 | void hibernation_thaw_swap(void); | ||
| 337 | swp_entry_t get_swap_for_hibernation(int type); | ||
| 338 | void swap_free_for_hibernation(swp_entry_t val); | ||
| 339 | #endif | ||
| 340 | |||
| 341 | /* linux/mm/thrash.c */ | 336 | /* linux/mm/thrash.c */ |
| 342 | extern struct mm_struct *swap_token_mm; | 337 | extern struct mm_struct *swap_token_mm; |
| 343 | extern void grab_swap_token(struct mm_struct *); | 338 | extern void grab_swap_token(struct mm_struct *); |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 7f43ccdc1d38..eaaea37b3b75 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
| @@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone, | |||
| 170 | return x; | 170 | return x; |
| 171 | } | 171 | } |
| 172 | 172 | ||
| 173 | /* | ||
| 174 | * More accurate version that also considers the currently pending | ||
| 175 | * deltas. For that we need to loop over all cpus to find the current | ||
| 176 | * deltas. There is no synchronization so the result cannot be | ||
| 177 | * exactly accurate either. | ||
| 178 | */ | ||
| 179 | static inline unsigned long zone_page_state_snapshot(struct zone *zone, | ||
| 180 | enum zone_stat_item item) | ||
| 181 | { | ||
| 182 | long x = atomic_long_read(&zone->vm_stat[item]); | ||
| 183 | |||
| 184 | #ifdef CONFIG_SMP | ||
| 185 | int cpu; | ||
| 186 | for_each_online_cpu(cpu) | ||
| 187 | x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; | ||
| 188 | |||
| 189 | if (x < 0) | ||
| 190 | x = 0; | ||
| 191 | #endif | ||
| 192 | return x; | ||
| 193 | } | ||
| 194 | |||
| 173 | extern unsigned long global_reclaimable_pages(void); | 195 | extern unsigned long global_reclaimable_pages(void); |
| 174 | extern unsigned long zone_reclaimable_pages(struct zone *zone); | 196 | extern unsigned long zone_reclaimable_pages(struct zone *zone); |
| 175 | 197 | ||
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 4f9d277bcd9a..f11100f96482 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -25,18 +25,20 @@ typedef void (*work_func_t)(struct work_struct *work); | |||
| 25 | 25 | ||
| 26 | enum { | 26 | enum { |
| 27 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ | 27 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ |
| 28 | WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */ | 28 | WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ |
| 29 | WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */ | 29 | WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */ |
| 30 | WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ | ||
| 30 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 31 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
| 31 | WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */ | 32 | WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ |
| 32 | WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ | 33 | WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ |
| 33 | #else | 34 | #else |
| 34 | WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */ | 35 | WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ |
| 35 | #endif | 36 | #endif |
| 36 | 37 | ||
| 37 | WORK_STRUCT_COLOR_BITS = 4, | 38 | WORK_STRUCT_COLOR_BITS = 4, |
| 38 | 39 | ||
| 39 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, | 40 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, |
| 41 | WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, | ||
| 40 | WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, | 42 | WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, |
| 41 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, | 43 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, |
| 42 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 44 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
| @@ -59,8 +61,8 @@ enum { | |||
| 59 | 61 | ||
| 60 | /* | 62 | /* |
| 61 | * Reserve 7 bits off of cwq pointer w/ debugobjects turned | 63 | * Reserve 7 bits off of cwq pointer w/ debugobjects turned |
| 62 | * off. This makes cwqs aligned to 128 bytes which isn't too | 64 | * off. This makes cwqs aligned to 256 bytes and allows 15 |
| 63 | * excessive while allowing 15 workqueue flush colors. | 65 | * workqueue flush colors. |
| 64 | */ | 66 | */ |
| 65 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + | 67 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + |
| 66 | WORK_STRUCT_COLOR_BITS, | 68 | WORK_STRUCT_COLOR_BITS, |
| @@ -241,6 +243,8 @@ enum { | |||
| 241 | WQ_HIGHPRI = 1 << 4, /* high priority */ | 243 | WQ_HIGHPRI = 1 << 4, /* high priority */ |
| 242 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ | 244 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ |
| 243 | 245 | ||
| 246 | WQ_DYING = 1 << 6, /* internal: workqueue is dying */ | ||
| 247 | |||
| 244 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ | 248 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ |
| 245 | WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ | 249 | WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ |
| 246 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, | 250 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, |
