diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/ata.h | 2 | ||||
-rw-r--r-- | include/linux/bio.h | 4 | ||||
-rw-r--r-- | include/linux/bootmem.h | 36 | ||||
-rw-r--r-- | include/linux/compiler-gcc.h | 10 | ||||
-rw-r--r-- | include/linux/cpufreq.h | 1 | ||||
-rw-r--r-- | include/linux/dmaengine.h | 7 | ||||
-rw-r--r-- | include/linux/dmar.h | 52 | ||||
-rw-r--r-- | include/linux/hdreg.h | 1 | ||||
-rw-r--r-- | include/linux/ide.h | 2 | ||||
-rw-r--r-- | include/linux/intel-iommu.h | 5 | ||||
-rw-r--r-- | include/linux/libata.h | 6 | ||||
-rw-r--r-- | include/linux/lockd/lockd.h | 8 | ||||
-rw-r--r-- | include/linux/mm.h | 3 | ||||
-rw-r--r-- | include/linux/mm_types.h | 3 | ||||
-rw-r--r-- | include/linux/netdevice.h | 1 | ||||
-rw-r--r-- | include/linux/nfs_xdr.h | 2 | ||||
-rw-r--r-- | include/linux/nfsacl.h | 3 | ||||
-rw-r--r-- | include/linux/percpu.h | 112 | ||||
-rw-r--r-- | include/linux/rcuclassic.h | 6 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 4 | ||||
-rw-r--r-- | include/linux/rcupreempt.h | 15 | ||||
-rw-r--r-- | include/linux/rcutree.h | 6 | ||||
-rw-r--r-- | include/linux/sched.h | 7 | ||||
-rw-r--r-- | include/linux/serio.h | 2 | ||||
-rw-r--r-- | include/linux/vmalloc.h | 4 |
25 files changed, 224 insertions, 78 deletions
diff --git a/include/linux/ata.h b/include/linux/ata.h index 08a86d5cdf1b..9a061accd8b8 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -89,6 +89,8 @@ enum { | |||
89 | ATA_ID_DLF = 128, | 89 | ATA_ID_DLF = 128, |
90 | ATA_ID_CSFO = 129, | 90 | ATA_ID_CSFO = 129, |
91 | ATA_ID_CFA_POWER = 160, | 91 | ATA_ID_CFA_POWER = 160, |
92 | ATA_ID_CFA_KEY_MGMT = 162, | ||
93 | ATA_ID_CFA_MODES = 163, | ||
92 | ATA_ID_ROT_SPEED = 217, | 94 | ATA_ID_ROT_SPEED = 217, |
93 | ATA_ID_PIO4 = (1 << 1), | 95 | ATA_ID_PIO4 = (1 << 1), |
94 | 96 | ||
diff --git a/include/linux/bio.h b/include/linux/bio.h index 1b16108a5417..d8bd43bfdcf5 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -531,7 +531,7 @@ extern void bio_integrity_endio(struct bio *, int); | |||
531 | extern void bio_integrity_advance(struct bio *, unsigned int); | 531 | extern void bio_integrity_advance(struct bio *, unsigned int); |
532 | extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); | 532 | extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); |
533 | extern void bio_integrity_split(struct bio *, struct bio_pair *, int); | 533 | extern void bio_integrity_split(struct bio *, struct bio_pair *, int); |
534 | extern int bio_integrity_clone(struct bio *, struct bio *, struct bio_set *); | 534 | extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t, struct bio_set *); |
535 | extern int bioset_integrity_create(struct bio_set *, int); | 535 | extern int bioset_integrity_create(struct bio_set *, int); |
536 | extern void bioset_integrity_free(struct bio_set *); | 536 | extern void bioset_integrity_free(struct bio_set *); |
537 | extern void bio_integrity_init_slab(void); | 537 | extern void bio_integrity_init_slab(void); |
@@ -542,7 +542,7 @@ extern void bio_integrity_init_slab(void); | |||
542 | #define bioset_integrity_create(a, b) (0) | 542 | #define bioset_integrity_create(a, b) (0) |
543 | #define bio_integrity_prep(a) (0) | 543 | #define bio_integrity_prep(a) (0) |
544 | #define bio_integrity_enabled(a) (0) | 544 | #define bio_integrity_enabled(a) (0) |
545 | #define bio_integrity_clone(a, b, c) (0) | 545 | #define bio_integrity_clone(a, b, c,d ) (0) |
546 | #define bioset_integrity_free(a) do { } while (0) | 546 | #define bioset_integrity_free(a) do { } while (0) |
547 | #define bio_integrity_free(a, b) do { } while (0) | 547 | #define bio_integrity_free(a, b) do { } while (0) |
548 | #define bio_integrity_endio(a, b) do { } while (0) | 548 | #define bio_integrity_endio(a, b) do { } while (0) |
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 95837bfb5256..455d83219fae 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
@@ -65,23 +65,20 @@ extern void free_bootmem(unsigned long addr, unsigned long size); | |||
65 | #define BOOTMEM_DEFAULT 0 | 65 | #define BOOTMEM_DEFAULT 0 |
66 | #define BOOTMEM_EXCLUSIVE (1<<0) | 66 | #define BOOTMEM_EXCLUSIVE (1<<0) |
67 | 67 | ||
68 | extern int reserve_bootmem(unsigned long addr, | ||
69 | unsigned long size, | ||
70 | int flags); | ||
68 | extern int reserve_bootmem_node(pg_data_t *pgdat, | 71 | extern int reserve_bootmem_node(pg_data_t *pgdat, |
69 | unsigned long physaddr, | 72 | unsigned long physaddr, |
70 | unsigned long size, | 73 | unsigned long size, |
71 | int flags); | 74 | int flags); |
72 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | ||
73 | extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags); | ||
74 | #endif | ||
75 | 75 | ||
76 | extern void *__alloc_bootmem_nopanic(unsigned long size, | 76 | extern void *__alloc_bootmem(unsigned long size, |
77 | unsigned long align, | 77 | unsigned long align, |
78 | unsigned long goal); | 78 | unsigned long goal); |
79 | extern void *__alloc_bootmem(unsigned long size, | 79 | extern void *__alloc_bootmem_nopanic(unsigned long size, |
80 | unsigned long align, | 80 | unsigned long align, |
81 | unsigned long goal); | 81 | unsigned long goal); |
82 | extern void *__alloc_bootmem_low(unsigned long size, | ||
83 | unsigned long align, | ||
84 | unsigned long goal); | ||
85 | extern void *__alloc_bootmem_node(pg_data_t *pgdat, | 82 | extern void *__alloc_bootmem_node(pg_data_t *pgdat, |
86 | unsigned long size, | 83 | unsigned long size, |
87 | unsigned long align, | 84 | unsigned long align, |
@@ -90,30 +87,35 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, | |||
90 | unsigned long size, | 87 | unsigned long size, |
91 | unsigned long align, | 88 | unsigned long align, |
92 | unsigned long goal); | 89 | unsigned long goal); |
90 | extern void *__alloc_bootmem_low(unsigned long size, | ||
91 | unsigned long align, | ||
92 | unsigned long goal); | ||
93 | extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, | 93 | extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, |
94 | unsigned long size, | 94 | unsigned long size, |
95 | unsigned long align, | 95 | unsigned long align, |
96 | unsigned long goal); | 96 | unsigned long goal); |
97 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | 97 | |
98 | #define alloc_bootmem(x) \ | 98 | #define alloc_bootmem(x) \ |
99 | __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 99 | __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
100 | #define alloc_bootmem_nopanic(x) \ | 100 | #define alloc_bootmem_nopanic(x) \ |
101 | __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 101 | __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
102 | #define alloc_bootmem_low(x) \ | ||
103 | __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) | ||
104 | #define alloc_bootmem_pages(x) \ | 102 | #define alloc_bootmem_pages(x) \ |
105 | __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 103 | __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
106 | #define alloc_bootmem_pages_nopanic(x) \ | 104 | #define alloc_bootmem_pages_nopanic(x) \ |
107 | __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 105 | __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
108 | #define alloc_bootmem_low_pages(x) \ | ||
109 | __alloc_bootmem_low(x, PAGE_SIZE, 0) | ||
110 | #define alloc_bootmem_node(pgdat, x) \ | 106 | #define alloc_bootmem_node(pgdat, x) \ |
111 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 107 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
112 | #define alloc_bootmem_pages_node(pgdat, x) \ | 108 | #define alloc_bootmem_pages_node(pgdat, x) \ |
113 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 109 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
110 | #define alloc_bootmem_pages_node_nopanic(pgdat, x) \ | ||
111 | __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | ||
112 | |||
113 | #define alloc_bootmem_low(x) \ | ||
114 | __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) | ||
115 | #define alloc_bootmem_low_pages(x) \ | ||
116 | __alloc_bootmem_low(x, PAGE_SIZE, 0) | ||
114 | #define alloc_bootmem_low_pages_node(pgdat, x) \ | 117 | #define alloc_bootmem_low_pages_node(pgdat, x) \ |
115 | __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) | 118 | __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) |
116 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ | ||
117 | 119 | ||
118 | extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, | 120 | extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, |
119 | int flags); | 121 | int flags); |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 1514d534deeb..a3ed7cb8ca34 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
@@ -52,7 +52,15 @@ | |||
52 | #define __deprecated __attribute__((deprecated)) | 52 | #define __deprecated __attribute__((deprecated)) |
53 | #define __packed __attribute__((packed)) | 53 | #define __packed __attribute__((packed)) |
54 | #define __weak __attribute__((weak)) | 54 | #define __weak __attribute__((weak)) |
55 | #define __naked __attribute__((naked)) | 55 | |
56 | /* | ||
57 | * it doesn't make sense on ARM (currently the only user of __naked) to trace | ||
58 | * naked functions because then mcount is called without stack and frame pointer | ||
59 | * being set up and there is no chance to restore the lr register to the value | ||
60 | * before mcount was called. | ||
61 | */ | ||
62 | #define __naked __attribute__((naked)) notrace | ||
63 | |||
56 | #define __noreturn __attribute__((noreturn)) | 64 | #define __noreturn __attribute__((noreturn)) |
57 | 65 | ||
58 | /* | 66 | /* |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 384b38d3e8e2..161042746afc 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -234,7 +234,6 @@ struct cpufreq_driver { | |||
234 | int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); | 234 | int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); |
235 | int (*resume) (struct cpufreq_policy *policy); | 235 | int (*resume) (struct cpufreq_policy *policy); |
236 | struct freq_attr **attr; | 236 | struct freq_attr **attr; |
237 | bool hide_interface; | ||
238 | }; | 237 | }; |
239 | 238 | ||
240 | /* flags */ | 239 | /* flags */ |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index f0413845f20e..1956c8d46d32 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -97,7 +97,6 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; | |||
97 | 97 | ||
98 | /** | 98 | /** |
99 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan | 99 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan |
100 | * @refcount: local_t used for open-coded "bigref" counting | ||
101 | * @memcpy_count: transaction counter | 100 | * @memcpy_count: transaction counter |
102 | * @bytes_transferred: byte counter | 101 | * @bytes_transferred: byte counter |
103 | */ | 102 | */ |
@@ -114,9 +113,6 @@ struct dma_chan_percpu { | |||
114 | * @cookie: last cookie value returned to client | 113 | * @cookie: last cookie value returned to client |
115 | * @chan_id: channel ID for sysfs | 114 | * @chan_id: channel ID for sysfs |
116 | * @dev: class device for sysfs | 115 | * @dev: class device for sysfs |
117 | * @refcount: kref, used in "bigref" slow-mode | ||
118 | * @slow_ref: indicates that the DMA channel is free | ||
119 | * @rcu: the DMA channel's RCU head | ||
120 | * @device_node: used to add this to the device chan list | 116 | * @device_node: used to add this to the device chan list |
121 | * @local: per-cpu pointer to a struct dma_chan_percpu | 117 | * @local: per-cpu pointer to a struct dma_chan_percpu |
122 | * @client-count: how many clients are using this channel | 118 | * @client-count: how many clients are using this channel |
@@ -213,8 +209,6 @@ struct dma_async_tx_descriptor { | |||
213 | * @global_node: list_head for global dma_device_list | 209 | * @global_node: list_head for global dma_device_list |
214 | * @cap_mask: one or more dma_capability flags | 210 | * @cap_mask: one or more dma_capability flags |
215 | * @max_xor: maximum number of xor sources, 0 if no capability | 211 | * @max_xor: maximum number of xor sources, 0 if no capability |
216 | * @refcount: reference count | ||
217 | * @done: IO completion struct | ||
218 | * @dev_id: unique device ID | 212 | * @dev_id: unique device ID |
219 | * @dev: struct device reference for dma mapping api | 213 | * @dev: struct device reference for dma mapping api |
220 | * @device_alloc_chan_resources: allocate resources and return the | 214 | * @device_alloc_chan_resources: allocate resources and return the |
@@ -227,6 +221,7 @@ struct dma_async_tx_descriptor { | |||
227 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 221 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
228 | * @device_prep_slave_sg: prepares a slave dma operation | 222 | * @device_prep_slave_sg: prepares a slave dma operation |
229 | * @device_terminate_all: terminate all pending operations | 223 | * @device_terminate_all: terminate all pending operations |
224 | * @device_is_tx_complete: poll for transaction completion | ||
230 | * @device_issue_pending: push pending transactions to hardware | 225 | * @device_issue_pending: push pending transactions to hardware |
231 | */ | 226 | */ |
232 | struct dma_device { | 227 | struct dma_device { |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index f28440784cf0..2f3427468956 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
@@ -24,10 +24,10 @@ | |||
24 | #include <linux/acpi.h> | 24 | #include <linux/acpi.h> |
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/msi.h> | 26 | #include <linux/msi.h> |
27 | #include <linux/irqreturn.h> | ||
27 | 28 | ||
28 | #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) | ||
29 | struct intel_iommu; | 29 | struct intel_iommu; |
30 | 30 | #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) | |
31 | struct dmar_drhd_unit { | 31 | struct dmar_drhd_unit { |
32 | struct list_head list; /* list of drhd units */ | 32 | struct list_head list; /* list of drhd units */ |
33 | struct acpi_dmar_header *hdr; /* ACPI header */ | 33 | struct acpi_dmar_header *hdr; /* ACPI header */ |
@@ -49,7 +49,7 @@ extern int dmar_dev_scope_init(void); | |||
49 | 49 | ||
50 | /* Intel IOMMU detection */ | 50 | /* Intel IOMMU detection */ |
51 | extern void detect_intel_iommu(void); | 51 | extern void detect_intel_iommu(void); |
52 | 52 | extern int enable_drhd_fault_handling(void); | |
53 | 53 | ||
54 | extern int parse_ioapics_under_ir(void); | 54 | extern int parse_ioapics_under_ir(void); |
55 | extern int alloc_iommu(struct dmar_drhd_unit *); | 55 | extern int alloc_iommu(struct dmar_drhd_unit *); |
@@ -63,12 +63,12 @@ static inline int dmar_table_init(void) | |||
63 | { | 63 | { |
64 | return -ENODEV; | 64 | return -ENODEV; |
65 | } | 65 | } |
66 | static inline int enable_drhd_fault_handling(void) | ||
67 | { | ||
68 | return -1; | ||
69 | } | ||
66 | #endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */ | 70 | #endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */ |
67 | 71 | ||
68 | #ifdef CONFIG_INTR_REMAP | ||
69 | extern int intr_remapping_enabled; | ||
70 | extern int enable_intr_remapping(int); | ||
71 | |||
72 | struct irte { | 72 | struct irte { |
73 | union { | 73 | union { |
74 | struct { | 74 | struct { |
@@ -97,6 +97,10 @@ struct irte { | |||
97 | __u64 high; | 97 | __u64 high; |
98 | }; | 98 | }; |
99 | }; | 99 | }; |
100 | #ifdef CONFIG_INTR_REMAP | ||
101 | extern int intr_remapping_enabled; | ||
102 | extern int enable_intr_remapping(int); | ||
103 | |||
100 | extern int get_irte(int irq, struct irte *entry); | 104 | extern int get_irte(int irq, struct irte *entry); |
101 | extern int modify_irte(int irq, struct irte *irte_modified); | 105 | extern int modify_irte(int irq, struct irte *irte_modified); |
102 | extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); | 106 | extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); |
@@ -111,14 +115,40 @@ extern int irq_remapped(int irq); | |||
111 | extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); | 115 | extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); |
112 | extern struct intel_iommu *map_ioapic_to_ir(int apic); | 116 | extern struct intel_iommu *map_ioapic_to_ir(int apic); |
113 | #else | 117 | #else |
118 | static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | ||
119 | { | ||
120 | return -1; | ||
121 | } | ||
122 | static inline int modify_irte(int irq, struct irte *irte_modified) | ||
123 | { | ||
124 | return -1; | ||
125 | } | ||
126 | static inline int free_irte(int irq) | ||
127 | { | ||
128 | return -1; | ||
129 | } | ||
130 | static inline int map_irq_to_irte_handle(int irq, u16 *sub_handle) | ||
131 | { | ||
132 | return -1; | ||
133 | } | ||
134 | static inline int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, | ||
135 | u16 sub_handle) | ||
136 | { | ||
137 | return -1; | ||
138 | } | ||
139 | static inline struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) | ||
140 | { | ||
141 | return NULL; | ||
142 | } | ||
143 | static inline struct intel_iommu *map_ioapic_to_ir(int apic) | ||
144 | { | ||
145 | return NULL; | ||
146 | } | ||
114 | #define irq_remapped(irq) (0) | 147 | #define irq_remapped(irq) (0) |
115 | #define enable_intr_remapping(mode) (-1) | 148 | #define enable_intr_remapping(mode) (-1) |
116 | #define intr_remapping_enabled (0) | 149 | #define intr_remapping_enabled (0) |
117 | #endif | 150 | #endif |
118 | 151 | ||
119 | #ifdef CONFIG_DMAR | ||
120 | extern const char *dmar_get_fault_reason(u8 fault_reason); | ||
121 | |||
122 | /* Can't use the common MSI interrupt functions | 152 | /* Can't use the common MSI interrupt functions |
123 | * since DMAR is not a pci device | 153 | * since DMAR is not a pci device |
124 | */ | 154 | */ |
@@ -127,8 +157,10 @@ extern void dmar_msi_mask(unsigned int irq); | |||
127 | extern void dmar_msi_read(int irq, struct msi_msg *msg); | 157 | extern void dmar_msi_read(int irq, struct msi_msg *msg); |
128 | extern void dmar_msi_write(int irq, struct msi_msg *msg); | 158 | extern void dmar_msi_write(int irq, struct msi_msg *msg); |
129 | extern int dmar_set_interrupt(struct intel_iommu *iommu); | 159 | extern int dmar_set_interrupt(struct intel_iommu *iommu); |
160 | extern irqreturn_t dmar_fault(int irq, void *dev_id); | ||
130 | extern int arch_setup_dmar_msi(unsigned int irq); | 161 | extern int arch_setup_dmar_msi(unsigned int irq); |
131 | 162 | ||
163 | #ifdef CONFIG_DMAR | ||
132 | extern int iommu_detected, no_iommu; | 164 | extern int iommu_detected, no_iommu; |
133 | extern struct list_head dmar_rmrr_units; | 165 | extern struct list_head dmar_rmrr_units; |
134 | struct dmar_rmrr_unit { | 166 | struct dmar_rmrr_unit { |
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h index c37e9241fae7..ed21bd3dbd25 100644 --- a/include/linux/hdreg.h +++ b/include/linux/hdreg.h | |||
@@ -511,7 +511,6 @@ struct hd_driveid { | |||
511 | unsigned short words69_70[2]; /* reserved words 69-70 | 511 | unsigned short words69_70[2]; /* reserved words 69-70 |
512 | * future command overlap and queuing | 512 | * future command overlap and queuing |
513 | */ | 513 | */ |
514 | /* HDIO_GET_IDENTITY currently returns only words 0 through 70 */ | ||
515 | unsigned short words71_74[4]; /* reserved words 71-74 | 514 | unsigned short words71_74[4]; /* reserved words 71-74 |
516 | * for IDENTIFY PACKET DEVICE command | 515 | * for IDENTIFY PACKET DEVICE command |
517 | */ | 516 | */ |
diff --git a/include/linux/ide.h b/include/linux/ide.h index fe235b65207e..25087aead657 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -797,6 +797,7 @@ typedef struct hwif_s { | |||
797 | struct scatterlist *sg_table; | 797 | struct scatterlist *sg_table; |
798 | int sg_max_nents; /* Maximum number of entries in it */ | 798 | int sg_max_nents; /* Maximum number of entries in it */ |
799 | int sg_nents; /* Current number of entries in it */ | 799 | int sg_nents; /* Current number of entries in it */ |
800 | int orig_sg_nents; | ||
800 | int sg_dma_direction; /* dma transfer direction */ | 801 | int sg_dma_direction; /* dma transfer direction */ |
801 | 802 | ||
802 | /* data phase of the active command (currently only valid for PIO/DMA) */ | 803 | /* data phase of the active command (currently only valid for PIO/DMA) */ |
@@ -866,6 +867,7 @@ struct ide_host { | |||
866 | unsigned int n_ports; | 867 | unsigned int n_ports; |
867 | struct device *dev[2]; | 868 | struct device *dev[2]; |
868 | unsigned int (*init_chipset)(struct pci_dev *); | 869 | unsigned int (*init_chipset)(struct pci_dev *); |
870 | irq_handler_t irq_handler; | ||
869 | unsigned long host_flags; | 871 | unsigned long host_flags; |
870 | void *host_priv; | 872 | void *host_priv; |
871 | ide_hwif_t *cur_port; /* for hosts requiring serialization */ | 873 | ide_hwif_t *cur_port; /* for hosts requiring serialization */ |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index d2e3cbfba14f..78c1262e8704 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -292,6 +292,8 @@ struct intel_iommu { | |||
292 | spinlock_t register_lock; /* protect register handling */ | 292 | spinlock_t register_lock; /* protect register handling */ |
293 | int seq_id; /* sequence id of the iommu */ | 293 | int seq_id; /* sequence id of the iommu */ |
294 | int agaw; /* agaw of this iommu */ | 294 | int agaw; /* agaw of this iommu */ |
295 | unsigned int irq; | ||
296 | unsigned char name[13]; /* Device Name */ | ||
295 | 297 | ||
296 | #ifdef CONFIG_DMAR | 298 | #ifdef CONFIG_DMAR |
297 | unsigned long *domain_ids; /* bitmap of domains */ | 299 | unsigned long *domain_ids; /* bitmap of domains */ |
@@ -299,8 +301,6 @@ struct intel_iommu { | |||
299 | spinlock_t lock; /* protect context, domain ids */ | 301 | spinlock_t lock; /* protect context, domain ids */ |
300 | struct root_entry *root_entry; /* virtual address */ | 302 | struct root_entry *root_entry; /* virtual address */ |
301 | 303 | ||
302 | unsigned int irq; | ||
303 | unsigned char name[7]; /* Device Name */ | ||
304 | struct iommu_flush flush; | 304 | struct iommu_flush flush; |
305 | #endif | 305 | #endif |
306 | struct q_inval *qi; /* Queued invalidation info */ | 306 | struct q_inval *qi; /* Queued invalidation info */ |
@@ -321,6 +321,7 @@ extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); | |||
321 | extern int alloc_iommu(struct dmar_drhd_unit *drhd); | 321 | extern int alloc_iommu(struct dmar_drhd_unit *drhd); |
322 | extern void free_iommu(struct intel_iommu *iommu); | 322 | extern void free_iommu(struct intel_iommu *iommu); |
323 | extern int dmar_enable_qi(struct intel_iommu *iommu); | 323 | extern int dmar_enable_qi(struct intel_iommu *iommu); |
324 | extern void dmar_disable_qi(struct intel_iommu *iommu); | ||
324 | extern void qi_global_iec(struct intel_iommu *iommu); | 325 | extern void qi_global_iec(struct intel_iommu *iommu); |
325 | 326 | ||
326 | extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, | 327 | extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 5d87bc09a1f5..dc18b87ed722 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -275,7 +275,7 @@ enum { | |||
275 | * advised to wait only for the following duration before | 275 | * advised to wait only for the following duration before |
276 | * doing SRST. | 276 | * doing SRST. |
277 | */ | 277 | */ |
278 | ATA_TMOUT_PMP_SRST_WAIT = 1000, | 278 | ATA_TMOUT_PMP_SRST_WAIT = 5000, |
279 | 279 | ||
280 | /* ATA bus states */ | 280 | /* ATA bus states */ |
281 | BUS_UNKNOWN = 0, | 281 | BUS_UNKNOWN = 0, |
@@ -530,6 +530,7 @@ struct ata_queued_cmd { | |||
530 | unsigned long flags; /* ATA_QCFLAG_xxx */ | 530 | unsigned long flags; /* ATA_QCFLAG_xxx */ |
531 | unsigned int tag; | 531 | unsigned int tag; |
532 | unsigned int n_elem; | 532 | unsigned int n_elem; |
533 | unsigned int orig_n_elem; | ||
533 | 534 | ||
534 | int dma_dir; | 535 | int dma_dir; |
535 | 536 | ||
@@ -750,7 +751,8 @@ struct ata_port { | |||
750 | acpi_handle acpi_handle; | 751 | acpi_handle acpi_handle; |
751 | struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */ | 752 | struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */ |
752 | #endif | 753 | #endif |
753 | u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ | 754 | /* owned by EH */ |
755 | u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned; | ||
754 | }; | 756 | }; |
755 | 757 | ||
756 | /* The following initializer overrides a method to NULL whether one of | 758 | /* The following initializer overrides a method to NULL whether one of |
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index aa6fe7026de7..51855dfd8adb 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h | |||
@@ -346,6 +346,7 @@ static inline int __nlm_cmp_addr4(const struct sockaddr *sap1, | |||
346 | return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; | 346 | return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; |
347 | } | 347 | } |
348 | 348 | ||
349 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
349 | static inline int __nlm_cmp_addr6(const struct sockaddr *sap1, | 350 | static inline int __nlm_cmp_addr6(const struct sockaddr *sap1, |
350 | const struct sockaddr *sap2) | 351 | const struct sockaddr *sap2) |
351 | { | 352 | { |
@@ -353,6 +354,13 @@ static inline int __nlm_cmp_addr6(const struct sockaddr *sap1, | |||
353 | const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; | 354 | const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; |
354 | return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr); | 355 | return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr); |
355 | } | 356 | } |
357 | #else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ | ||
358 | static inline int __nlm_cmp_addr6(const struct sockaddr *sap1, | ||
359 | const struct sockaddr *sap2) | ||
360 | { | ||
361 | return 0; | ||
362 | } | ||
363 | #endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ | ||
356 | 364 | ||
357 | /* | 365 | /* |
358 | * Compare two host addresses | 366 | * Compare two host addresses |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 065cdf8c09fb..b1ea37fc7a24 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -104,6 +104,7 @@ extern unsigned int kobjsize(const void *objp); | |||
104 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ | 104 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ |
105 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ | 105 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ |
106 | #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ | 106 | #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ |
107 | #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ | ||
107 | 108 | ||
108 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ | 109 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ |
109 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS | 110 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS |
@@ -145,7 +146,7 @@ extern pgprot_t protection_map[16]; | |||
145 | */ | 146 | */ |
146 | static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) | 147 | static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) |
147 | { | 148 | { |
148 | return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff); | 149 | return (vma->vm_flags & VM_PFN_AT_MMAP); |
149 | } | 150 | } |
150 | 151 | ||
151 | static inline int is_pfn_mapping(struct vm_area_struct *vma) | 152 | static inline int is_pfn_mapping(struct vm_area_struct *vma) |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 92915e81443f..d84feb7bdbf0 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -276,4 +276,7 @@ struct mm_struct { | |||
276 | #endif | 276 | #endif |
277 | }; | 277 | }; |
278 | 278 | ||
279 | /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ | ||
280 | #define mm_cpumask(mm) (&(mm)->cpu_vm_mask) | ||
281 | |||
279 | #endif /* _LINUX_MM_TYPES_H */ | 282 | #endif /* _LINUX_MM_TYPES_H */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ec54785d34f9..659366734f3f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1079,6 +1079,7 @@ extern void synchronize_net(void); | |||
1079 | extern int register_netdevice_notifier(struct notifier_block *nb); | 1079 | extern int register_netdevice_notifier(struct notifier_block *nb); |
1080 | extern int unregister_netdevice_notifier(struct notifier_block *nb); | 1080 | extern int unregister_netdevice_notifier(struct notifier_block *nb); |
1081 | extern int init_dummy_netdev(struct net_device *dev); | 1081 | extern int init_dummy_netdev(struct net_device *dev); |
1082 | extern void netdev_resync_ops(struct net_device *dev); | ||
1082 | 1083 | ||
1083 | extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); | 1084 | extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); |
1084 | extern struct net_device *dev_get_by_index(struct net *net, int ifindex); | 1085 | extern struct net_device *dev_get_by_index(struct net *net, int ifindex); |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index a550b528319f..2e5f00066afd 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -406,6 +406,8 @@ struct nfs3_setaclargs { | |||
406 | int mask; | 406 | int mask; |
407 | struct posix_acl * acl_access; | 407 | struct posix_acl * acl_access; |
408 | struct posix_acl * acl_default; | 408 | struct posix_acl * acl_default; |
409 | size_t len; | ||
410 | unsigned int npages; | ||
409 | struct page ** pages; | 411 | struct page ** pages; |
410 | }; | 412 | }; |
411 | 413 | ||
diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h index 54487a99beb8..43011b69297c 100644 --- a/include/linux/nfsacl.h +++ b/include/linux/nfsacl.h | |||
@@ -37,6 +37,9 @@ | |||
37 | #define NFSACL_MAXPAGES ((2*(8+12*NFS_ACL_MAX_ENTRIES) + PAGE_SIZE-1) \ | 37 | #define NFSACL_MAXPAGES ((2*(8+12*NFS_ACL_MAX_ENTRIES) + PAGE_SIZE-1) \ |
38 | >> PAGE_SHIFT) | 38 | >> PAGE_SHIFT) |
39 | 39 | ||
40 | #define NFS_ACL_MAX_ENTRIES_INLINE (5) | ||
41 | #define NFS_ACL_INLINE_BUFSIZE ((2*(2+3*NFS_ACL_MAX_ENTRIES_INLINE)) << 2) | ||
42 | |||
40 | static inline unsigned int | 43 | static inline unsigned int |
41 | nfsacl_size(struct posix_acl *acl_access, struct posix_acl *acl_default) | 44 | nfsacl_size(struct posix_acl *acl_access, struct posix_acl *acl_default) |
42 | { | 45 | { |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 3577ffd90d45..ee5615d65211 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/slab.h> /* For kmalloc() */ | 5 | #include <linux/slab.h> /* For kmalloc() */ |
6 | #include <linux/smp.h> | 6 | #include <linux/smp.h> |
7 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
8 | #include <linux/pfn.h> | ||
8 | 9 | ||
9 | #include <asm/percpu.h> | 10 | #include <asm/percpu.h> |
10 | 11 | ||
@@ -52,17 +53,18 @@ | |||
52 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | 53 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) |
53 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | 54 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) |
54 | 55 | ||
55 | /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ | 56 | /* enough to cover all DEFINE_PER_CPUs in modules */ |
56 | #ifndef PERCPU_ENOUGH_ROOM | ||
57 | #ifdef CONFIG_MODULES | 57 | #ifdef CONFIG_MODULES |
58 | #define PERCPU_MODULE_RESERVE 8192 | 58 | #define PERCPU_MODULE_RESERVE (8 << 10) |
59 | #else | 59 | #else |
60 | #define PERCPU_MODULE_RESERVE 0 | 60 | #define PERCPU_MODULE_RESERVE 0 |
61 | #endif | 61 | #endif |
62 | 62 | ||
63 | #ifndef PERCPU_ENOUGH_ROOM | ||
63 | #define PERCPU_ENOUGH_ROOM \ | 64 | #define PERCPU_ENOUGH_ROOM \ |
64 | (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE) | 65 | (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ |
65 | #endif /* PERCPU_ENOUGH_ROOM */ | 66 | PERCPU_MODULE_RESERVE) |
67 | #endif | ||
66 | 68 | ||
67 | /* | 69 | /* |
68 | * Must be an lvalue. Since @var must be a simple identifier, | 70 | * Must be an lvalue. Since @var must be a simple identifier, |
@@ -76,52 +78,94 @@ | |||
76 | 78 | ||
77 | #ifdef CONFIG_SMP | 79 | #ifdef CONFIG_SMP |
78 | 80 | ||
81 | #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA | ||
82 | |||
83 | /* minimum unit size, also is the maximum supported allocation size */ | ||
84 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) | ||
85 | |||
86 | /* | ||
87 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy | ||
88 | * back on the first chunk for dynamic percpu allocation if arch is | ||
89 | * manually allocating and mapping it for faster access (as a part of | ||
90 | * large page mapping for example). | ||
91 | * | ||
92 | * The following values give between one and two pages of free space | ||
93 | * after typical minimal boot (2-way SMP, single disk and NIC) with | ||
94 | * both defconfig and a distro config on x86_64 and 32. More | ||
95 | * intelligent way to determine this would be nice. | ||
96 | */ | ||
97 | #if BITS_PER_LONG > 32 | ||
98 | #define PERCPU_DYNAMIC_RESERVE (20 << 10) | ||
99 | #else | ||
100 | #define PERCPU_DYNAMIC_RESERVE (12 << 10) | ||
101 | #endif | ||
102 | |||
103 | extern void *pcpu_base_addr; | ||
104 | |||
105 | typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno); | ||
106 | typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); | ||
107 | |||
108 | extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, | ||
109 | size_t static_size, size_t reserved_size, | ||
110 | ssize_t dyn_size, ssize_t unit_size, | ||
111 | void *base_addr, | ||
112 | pcpu_populate_pte_fn_t populate_pte_fn); | ||
113 | |||
114 | extern ssize_t __init pcpu_embed_first_chunk( | ||
115 | size_t static_size, size_t reserved_size, | ||
116 | ssize_t dyn_size, ssize_t unit_size); | ||
117 | |||
118 | /* | ||
119 | * Use this to get to a cpu's version of the per-cpu object | ||
120 | * dynamically allocated. Non-atomic access to the current CPU's | ||
121 | * version should probably be combined with get_cpu()/put_cpu(). | ||
122 | */ | ||
123 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) | ||
124 | |||
125 | extern void *__alloc_reserved_percpu(size_t size, size_t align); | ||
126 | |||
127 | #else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ | ||
128 | |||
79 | struct percpu_data { | 129 | struct percpu_data { |
80 | void *ptrs[1]; | 130 | void *ptrs[1]; |
81 | }; | 131 | }; |
82 | 132 | ||
83 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) | 133 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) |
84 | /* | 134 | |
85 | * Use this to get to a cpu's version of the per-cpu object dynamically | 135 | #define per_cpu_ptr(ptr, cpu) \ |
86 | * allocated. Non-atomic access to the current CPU's version should | 136 | ({ \ |
87 | * probably be combined with get_cpu()/put_cpu(). | 137 | struct percpu_data *__p = __percpu_disguise(ptr); \ |
88 | */ | 138 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ |
89 | #define percpu_ptr(ptr, cpu) \ | ||
90 | ({ \ | ||
91 | struct percpu_data *__p = __percpu_disguise(ptr); \ | ||
92 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ | ||
93 | }) | 139 | }) |
94 | 140 | ||
95 | extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); | 141 | #endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ |
96 | extern void percpu_free(void *__pdata); | 142 | |
143 | extern void *__alloc_percpu(size_t size, size_t align); | ||
144 | extern void free_percpu(void *__pdata); | ||
97 | 145 | ||
98 | #else /* CONFIG_SMP */ | 146 | #else /* CONFIG_SMP */ |
99 | 147 | ||
100 | #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) | 148 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
101 | 149 | ||
102 | static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) | 150 | static inline void *__alloc_percpu(size_t size, size_t align) |
103 | { | 151 | { |
104 | return kzalloc(size, gfp); | 152 | /* |
153 | * Can't easily make larger alignment work with kmalloc. WARN | ||
154 | * on it. Larger alignment should only be used for module | ||
155 | * percpu sections on SMP for which this path isn't used. | ||
156 | */ | ||
157 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); | ||
158 | return kzalloc(size, GFP_KERNEL); | ||
105 | } | 159 | } |
106 | 160 | ||
107 | static inline void percpu_free(void *__pdata) | 161 | static inline void free_percpu(void *p) |
108 | { | 162 | { |
109 | kfree(__pdata); | 163 | kfree(p); |
110 | } | 164 | } |
111 | 165 | ||
112 | #endif /* CONFIG_SMP */ | 166 | #endif /* CONFIG_SMP */ |
113 | 167 | ||
114 | #define percpu_alloc_mask(size, gfp, mask) \ | 168 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ |
115 | __percpu_alloc_mask((size), (gfp), &(mask)) | 169 | __alignof__(type)) |
116 | |||
117 | #define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map) | ||
118 | |||
119 | /* (legacy) interface for use without CPU hotplug handling */ | ||
120 | |||
121 | #define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \ | ||
122 | cpu_possible_map) | ||
123 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type)) | ||
124 | #define free_percpu(ptr) percpu_free((ptr)) | ||
125 | #define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu)) | ||
126 | 170 | ||
127 | #endif /* __LINUX_PERCPU_H */ | 171 | #endif /* __LINUX_PERCPU_H */ |
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index f3f697df1d71..80044a4f3ab9 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h | |||
@@ -181,4 +181,10 @@ extern long rcu_batches_completed_bh(void); | |||
181 | #define rcu_enter_nohz() do { } while (0) | 181 | #define rcu_enter_nohz() do { } while (0) |
182 | #define rcu_exit_nohz() do { } while (0) | 182 | #define rcu_exit_nohz() do { } while (0) |
183 | 183 | ||
184 | /* A context switch is a grace period for rcuclassic. */ | ||
185 | static inline int rcu_blocking_is_gp(void) | ||
186 | { | ||
187 | return num_online_cpus() == 1; | ||
188 | } | ||
189 | |||
184 | #endif /* __LINUX_RCUCLASSIC_H */ | 190 | #endif /* __LINUX_RCUCLASSIC_H */ |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 921340a7b71c..528343e6da51 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -52,6 +52,9 @@ struct rcu_head { | |||
52 | void (*func)(struct rcu_head *head); | 52 | void (*func)(struct rcu_head *head); |
53 | }; | 53 | }; |
54 | 54 | ||
55 | /* Internal to kernel, but needed by rcupreempt.h. */ | ||
56 | extern int rcu_scheduler_active; | ||
57 | |||
55 | #if defined(CONFIG_CLASSIC_RCU) | 58 | #if defined(CONFIG_CLASSIC_RCU) |
56 | #include <linux/rcuclassic.h> | 59 | #include <linux/rcuclassic.h> |
57 | #elif defined(CONFIG_TREE_RCU) | 60 | #elif defined(CONFIG_TREE_RCU) |
@@ -265,6 +268,7 @@ extern void rcu_barrier_sched(void); | |||
265 | 268 | ||
266 | /* Internal to kernel */ | 269 | /* Internal to kernel */ |
267 | extern void rcu_init(void); | 270 | extern void rcu_init(void); |
271 | extern void rcu_scheduler_starting(void); | ||
268 | extern int rcu_needs_cpu(int cpu); | 272 | extern int rcu_needs_cpu(int cpu); |
269 | 273 | ||
270 | #endif /* __LINUX_RCUPDATE_H */ | 274 | #endif /* __LINUX_RCUPDATE_H */ |
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index 3e05c09b54a2..74304b4538d8 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h | |||
@@ -142,4 +142,19 @@ static inline void rcu_exit_nohz(void) | |||
142 | #define rcu_exit_nohz() do { } while (0) | 142 | #define rcu_exit_nohz() do { } while (0) |
143 | #endif /* CONFIG_NO_HZ */ | 143 | #endif /* CONFIG_NO_HZ */ |
144 | 144 | ||
145 | /* | ||
146 | * A context switch is a grace period for rcupreempt synchronize_rcu() | ||
147 | * only during early boot, before the scheduler has been initialized. | ||
148 | * So, how the heck do we get a context switch? Well, if the caller | ||
149 | * invokes synchronize_rcu(), they are willing to accept a context | ||
150 | * switch, so we simply pretend that one happened. | ||
151 | * | ||
152 | * After boot, there might be a blocked or preempted task in an RCU | ||
153 | * read-side critical section, so we cannot then take the fastpath. | ||
154 | */ | ||
155 | static inline int rcu_blocking_is_gp(void) | ||
156 | { | ||
157 | return num_online_cpus() == 1 && !rcu_scheduler_active; | ||
158 | } | ||
159 | |||
145 | #endif /* __LINUX_RCUPREEMPT_H */ | 160 | #endif /* __LINUX_RCUPREEMPT_H */ |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index d4368b7975c3..a722fb67bb2d 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -326,4 +326,10 @@ static inline void rcu_exit_nohz(void) | |||
326 | } | 326 | } |
327 | #endif /* CONFIG_NO_HZ */ | 327 | #endif /* CONFIG_NO_HZ */ |
328 | 328 | ||
329 | /* A context switch is a grace period for rcutree. */ | ||
330 | static inline int rcu_blocking_is_gp(void) | ||
331 | { | ||
332 | return num_online_cpus() == 1; | ||
333 | } | ||
334 | |||
329 | #endif /* __LINUX_RCUTREE_H */ | 335 | #endif /* __LINUX_RCUTREE_H */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index f0a50b20e8a0..46d680643f89 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1418,6 +1418,9 @@ struct task_struct { | |||
1418 | #endif | 1418 | #endif |
1419 | }; | 1419 | }; |
1420 | 1420 | ||
1421 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | ||
1422 | #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) | ||
1423 | |||
1421 | /* | 1424 | /* |
1422 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT | 1425 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT |
1423 | * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH | 1426 | * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH |
@@ -2303,9 +2306,13 @@ extern long sched_group_rt_runtime(struct task_group *tg); | |||
2303 | extern int sched_group_set_rt_period(struct task_group *tg, | 2306 | extern int sched_group_set_rt_period(struct task_group *tg, |
2304 | long rt_period_us); | 2307 | long rt_period_us); |
2305 | extern long sched_group_rt_period(struct task_group *tg); | 2308 | extern long sched_group_rt_period(struct task_group *tg); |
2309 | extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); | ||
2306 | #endif | 2310 | #endif |
2307 | #endif | 2311 | #endif |
2308 | 2312 | ||
2313 | extern int task_can_switch_user(struct user_struct *up, | ||
2314 | struct task_struct *tsk); | ||
2315 | |||
2309 | #ifdef CONFIG_TASK_XACCT | 2316 | #ifdef CONFIG_TASK_XACCT |
2310 | static inline void add_rchar(struct task_struct *tsk, ssize_t amt) | 2317 | static inline void add_rchar(struct task_struct *tsk, ssize_t amt) |
2311 | { | 2318 | { |
diff --git a/include/linux/serio.h b/include/linux/serio.h index 1bcb357a01a1..e0417e4d3f15 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h | |||
@@ -212,7 +212,7 @@ static inline void serio_unpin_driver(struct serio *serio) | |||
212 | #define SERIO_FUJITSU 0x35 | 212 | #define SERIO_FUJITSU 0x35 |
213 | #define SERIO_ZHENHUA 0x36 | 213 | #define SERIO_ZHENHUA 0x36 |
214 | #define SERIO_INEXIO 0x37 | 214 | #define SERIO_INEXIO 0x37 |
215 | #define SERIO_TOUCHIT213 0x37 | 215 | #define SERIO_TOUCHIT213 0x38 |
216 | #define SERIO_W8001 0x39 | 216 | #define SERIO_W8001 0x39 |
217 | 217 | ||
218 | #endif | 218 | #endif |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 9c0890c7a06a..a43ebec3a7b9 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -95,6 +95,9 @@ extern struct vm_struct *remove_vm_area(const void *addr); | |||
95 | 95 | ||
96 | extern int map_vm_area(struct vm_struct *area, pgprot_t prot, | 96 | extern int map_vm_area(struct vm_struct *area, pgprot_t prot, |
97 | struct page ***pages); | 97 | struct page ***pages); |
98 | extern int map_kernel_range_noflush(unsigned long start, unsigned long size, | ||
99 | pgprot_t prot, struct page **pages); | ||
100 | extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); | ||
98 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); | 101 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); |
99 | 102 | ||
100 | /* Allocate/destroy a 'vmalloc' VM area. */ | 103 | /* Allocate/destroy a 'vmalloc' VM area. */ |
@@ -110,5 +113,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count); | |||
110 | */ | 113 | */ |
111 | extern rwlock_t vmlist_lock; | 114 | extern rwlock_t vmlist_lock; |
112 | extern struct vm_struct *vmlist; | 115 | extern struct vm_struct *vmlist; |
116 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); | ||
113 | 117 | ||
114 | #endif /* _LINUX_VMALLOC_H */ | 118 | #endif /* _LINUX_VMALLOC_H */ |