diff options
30 files changed, 174 insertions, 73 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c07f72205909..738bdc6b0f8b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -1913,6 +1913,18 @@ config DMAR_DEFAULT_ON | |||
1913 | recommended you say N here while the DMAR code remains | 1913 | recommended you say N here while the DMAR code remains |
1914 | experimental. | 1914 | experimental. |
1915 | 1915 | ||
1916 | config DMAR_BROKEN_GFX_WA | ||
1917 | def_bool n | ||
1918 | prompt "Workaround broken graphics drivers (going away soon)" | ||
1919 | depends on DMAR | ||
1920 | ---help--- | ||
1921 | Current Graphics drivers tend to use physical address | ||
1922 | for DMA and avoid using DMA APIs. Setting this config | ||
1923 | option permits the IOMMU driver to set a unity map for | ||
1924 | all the OS-visible memory. Hence the driver can continue | ||
1925 | to use physical addresses for DMA, at least until this | ||
1926 | option is removed in the 2.6.32 kernel. | ||
1927 | |||
1916 | config DMAR_FLOPPY_WA | 1928 | config DMAR_FLOPPY_WA |
1917 | def_bool y | 1929 | def_bool y |
1918 | depends on DMAR | 1930 | depends on DMAR |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 6c327b852e23..430d5b24af7b 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -26,6 +26,8 @@ CFLAGS_tsc.o := $(nostackp) | |||
26 | CFLAGS_paravirt.o := $(nostackp) | 26 | CFLAGS_paravirt.o := $(nostackp) |
27 | GCOV_PROFILE_vsyscall_64.o := n | 27 | GCOV_PROFILE_vsyscall_64.o := n |
28 | GCOV_PROFILE_hpet.o := n | 28 | GCOV_PROFILE_hpet.o := n |
29 | GCOV_PROFILE_tsc.o := n | ||
30 | GCOV_PROFILE_paravirt.o := n | ||
29 | 31 | ||
30 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o | 32 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o |
31 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o | 33 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o |
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 543fccac81bb..f74edae5cb4c 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c | |||
@@ -196,8 +196,8 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation) | |||
196 | { | 196 | { |
197 | int channel, bandwidth = 0; | 197 | int channel, bandwidth = 0; |
198 | 198 | ||
199 | fw_iso_resource_manage(card, generation, 1ULL << 31, | 199 | fw_iso_resource_manage(card, generation, 1ULL << 31, &channel, |
200 | &channel, &bandwidth, true); | 200 | &bandwidth, true, card->bm_transaction_data); |
201 | if (channel == 31) { | 201 | if (channel == 31) { |
202 | card->broadcast_channel_allocated = true; | 202 | card->broadcast_channel_allocated = true; |
203 | device_for_each_child(card->device, (void *)(long)generation, | 203 | device_for_each_child(card->device, (void *)(long)generation, |
@@ -230,7 +230,6 @@ static void fw_card_bm_work(struct work_struct *work) | |||
230 | bool do_reset = false; | 230 | bool do_reset = false; |
231 | bool root_device_is_running; | 231 | bool root_device_is_running; |
232 | bool root_device_is_cmc; | 232 | bool root_device_is_cmc; |
233 | __be32 lock_data[2]; | ||
234 | 233 | ||
235 | spin_lock_irqsave(&card->lock, flags); | 234 | spin_lock_irqsave(&card->lock, flags); |
236 | 235 | ||
@@ -273,22 +272,23 @@ static void fw_card_bm_work(struct work_struct *work) | |||
273 | goto pick_me; | 272 | goto pick_me; |
274 | } | 273 | } |
275 | 274 | ||
276 | lock_data[0] = cpu_to_be32(0x3f); | 275 | card->bm_transaction_data[0] = cpu_to_be32(0x3f); |
277 | lock_data[1] = cpu_to_be32(local_id); | 276 | card->bm_transaction_data[1] = cpu_to_be32(local_id); |
278 | 277 | ||
279 | spin_unlock_irqrestore(&card->lock, flags); | 278 | spin_unlock_irqrestore(&card->lock, flags); |
280 | 279 | ||
281 | rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | 280 | rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, |
282 | irm_id, generation, SCODE_100, | 281 | irm_id, generation, SCODE_100, |
283 | CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, | 282 | CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, |
284 | lock_data, sizeof(lock_data)); | 283 | card->bm_transaction_data, |
284 | sizeof(card->bm_transaction_data)); | ||
285 | 285 | ||
286 | if (rcode == RCODE_GENERATION) | 286 | if (rcode == RCODE_GENERATION) |
287 | /* Another bus reset, BM work has been rescheduled. */ | 287 | /* Another bus reset, BM work has been rescheduled. */ |
288 | goto out; | 288 | goto out; |
289 | 289 | ||
290 | if (rcode == RCODE_COMPLETE && | 290 | if (rcode == RCODE_COMPLETE && |
291 | lock_data[0] != cpu_to_be32(0x3f)) { | 291 | card->bm_transaction_data[0] != cpu_to_be32(0x3f)) { |
292 | 292 | ||
293 | /* Somebody else is BM. Only act as IRM. */ | 293 | /* Somebody else is BM. Only act as IRM. */ |
294 | if (local_id == irm_id) | 294 | if (local_id == irm_id) |
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index d1d30c615b0f..ced186d7e9a9 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
@@ -125,6 +125,7 @@ struct iso_resource { | |||
125 | int generation; | 125 | int generation; |
126 | u64 channels; | 126 | u64 channels; |
127 | s32 bandwidth; | 127 | s32 bandwidth; |
128 | __be32 transaction_data[2]; | ||
128 | struct iso_resource_event *e_alloc, *e_dealloc; | 129 | struct iso_resource_event *e_alloc, *e_dealloc; |
129 | }; | 130 | }; |
130 | 131 | ||
@@ -1049,7 +1050,8 @@ static void iso_resource_work(struct work_struct *work) | |||
1049 | r->channels, &channel, &bandwidth, | 1050 | r->channels, &channel, &bandwidth, |
1050 | todo == ISO_RES_ALLOC || | 1051 | todo == ISO_RES_ALLOC || |
1051 | todo == ISO_RES_REALLOC || | 1052 | todo == ISO_RES_REALLOC || |
1052 | todo == ISO_RES_ALLOC_ONCE); | 1053 | todo == ISO_RES_ALLOC_ONCE, |
1054 | r->transaction_data); | ||
1053 | /* | 1055 | /* |
1054 | * Is this generation outdated already? As long as this resource sticks | 1056 | * Is this generation outdated already? As long as this resource sticks |
1055 | * in the idr, it will be scheduled again for a newer generation or at | 1057 | * in the idr, it will be scheduled again for a newer generation or at |
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 166f19c6d38d..110e731f5574 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
@@ -177,9 +177,8 @@ EXPORT_SYMBOL(fw_iso_context_stop); | |||
177 | */ | 177 | */ |
178 | 178 | ||
179 | static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | 179 | static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, |
180 | int bandwidth, bool allocate) | 180 | int bandwidth, bool allocate, __be32 data[2]) |
181 | { | 181 | { |
182 | __be32 data[2]; | ||
183 | int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; | 182 | int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; |
184 | 183 | ||
185 | /* | 184 | /* |
@@ -215,9 +214,9 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | |||
215 | } | 214 | } |
216 | 215 | ||
217 | static int manage_channel(struct fw_card *card, int irm_id, int generation, | 216 | static int manage_channel(struct fw_card *card, int irm_id, int generation, |
218 | u32 channels_mask, u64 offset, bool allocate) | 217 | u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) |
219 | { | 218 | { |
220 | __be32 data[2], c, all, old; | 219 | __be32 c, all, old; |
221 | int i, retry = 5; | 220 | int i, retry = 5; |
222 | 221 | ||
223 | old = all = allocate ? cpu_to_be32(~0) : 0; | 222 | old = all = allocate ? cpu_to_be32(~0) : 0; |
@@ -260,7 +259,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
260 | } | 259 | } |
261 | 260 | ||
262 | static void deallocate_channel(struct fw_card *card, int irm_id, | 261 | static void deallocate_channel(struct fw_card *card, int irm_id, |
263 | int generation, int channel) | 262 | int generation, int channel, __be32 buffer[2]) |
264 | { | 263 | { |
265 | u32 mask; | 264 | u32 mask; |
266 | u64 offset; | 265 | u64 offset; |
@@ -269,7 +268,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id, | |||
269 | offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : | 268 | offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : |
270 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; | 269 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; |
271 | 270 | ||
272 | manage_channel(card, irm_id, generation, mask, offset, false); | 271 | manage_channel(card, irm_id, generation, mask, offset, false, buffer); |
273 | } | 272 | } |
274 | 273 | ||
275 | /** | 274 | /** |
@@ -298,7 +297,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id, | |||
298 | */ | 297 | */ |
299 | void fw_iso_resource_manage(struct fw_card *card, int generation, | 298 | void fw_iso_resource_manage(struct fw_card *card, int generation, |
300 | u64 channels_mask, int *channel, int *bandwidth, | 299 | u64 channels_mask, int *channel, int *bandwidth, |
301 | bool allocate) | 300 | bool allocate, __be32 buffer[2]) |
302 | { | 301 | { |
303 | u32 channels_hi = channels_mask; /* channels 31...0 */ | 302 | u32 channels_hi = channels_mask; /* channels 31...0 */ |
304 | u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ | 303 | u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ |
@@ -310,10 +309,12 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, | |||
310 | 309 | ||
311 | if (channels_hi) | 310 | if (channels_hi) |
312 | c = manage_channel(card, irm_id, generation, channels_hi, | 311 | c = manage_channel(card, irm_id, generation, channels_hi, |
313 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate); | 312 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, |
313 | allocate, buffer); | ||
314 | if (channels_lo && c < 0) { | 314 | if (channels_lo && c < 0) { |
315 | c = manage_channel(card, irm_id, generation, channels_lo, | 315 | c = manage_channel(card, irm_id, generation, channels_lo, |
316 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate); | 316 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, |
317 | allocate, buffer); | ||
317 | if (c >= 0) | 318 | if (c >= 0) |
318 | c += 32; | 319 | c += 32; |
319 | } | 320 | } |
@@ -325,12 +326,13 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, | |||
325 | if (*bandwidth == 0) | 326 | if (*bandwidth == 0) |
326 | return; | 327 | return; |
327 | 328 | ||
328 | ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); | 329 | ret = manage_bandwidth(card, irm_id, generation, *bandwidth, |
330 | allocate, buffer); | ||
329 | if (ret < 0) | 331 | if (ret < 0) |
330 | *bandwidth = 0; | 332 | *bandwidth = 0; |
331 | 333 | ||
332 | if (allocate && ret < 0 && c >= 0) { | 334 | if (allocate && ret < 0 && c >= 0) { |
333 | deallocate_channel(card, irm_id, generation, c); | 335 | deallocate_channel(card, irm_id, generation, c, buffer); |
334 | *channel = ret; | 336 | *channel = ret; |
335 | } | 337 | } |
336 | } | 338 | } |
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index c3cfc647e5e3..6052816be353 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h | |||
@@ -120,7 +120,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event); | |||
120 | 120 | ||
121 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); | 121 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); |
122 | void fw_iso_resource_manage(struct fw_card *card, int generation, | 122 | void fw_iso_resource_manage(struct fw_card *card, int generation, |
123 | u64 channels_mask, int *channel, int *bandwidth, bool allocate); | 123 | u64 channels_mask, int *channel, int *bandwidth, |
124 | bool allocate, __be32 buffer[2]); | ||
124 | 125 | ||
125 | 126 | ||
126 | /* -topology */ | 127 | /* -topology */ |
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 24c45635376a..8d51568ee143 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
@@ -201,6 +201,12 @@ static struct fw_device *target_device(struct sbp2_target *tgt) | |||
201 | #define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */ | 201 | #define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */ |
202 | 202 | ||
203 | /* | 203 | /* |
204 | * There is no transport protocol limit to the CDB length, but we implement | ||
205 | * a fixed length only. 16 bytes is enough for disks larger than 2 TB. | ||
206 | */ | ||
207 | #define SBP2_MAX_CDB_SIZE 16 | ||
208 | |||
209 | /* | ||
204 | * The default maximum s/g segment size of a FireWire controller is | 210 | * The default maximum s/g segment size of a FireWire controller is |
205 | * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to | 211 | * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to |
206 | * be quadlet-aligned, we set the length limit to 0xffff & ~3. | 212 | * be quadlet-aligned, we set the length limit to 0xffff & ~3. |
@@ -312,7 +318,7 @@ struct sbp2_command_orb { | |||
312 | struct sbp2_pointer next; | 318 | struct sbp2_pointer next; |
313 | struct sbp2_pointer data_descriptor; | 319 | struct sbp2_pointer data_descriptor; |
314 | __be32 misc; | 320 | __be32 misc; |
315 | u8 command_block[12]; | 321 | u8 command_block[SBP2_MAX_CDB_SIZE]; |
316 | } request; | 322 | } request; |
317 | struct scsi_cmnd *cmd; | 323 | struct scsi_cmnd *cmd; |
318 | scsi_done_fn_t done; | 324 | scsi_done_fn_t done; |
@@ -1146,6 +1152,8 @@ static int sbp2_probe(struct device *dev) | |||
1146 | if (fw_device_enable_phys_dma(device) < 0) | 1152 | if (fw_device_enable_phys_dma(device) < 0) |
1147 | goto fail_shost_put; | 1153 | goto fail_shost_put; |
1148 | 1154 | ||
1155 | shost->max_cmd_len = SBP2_MAX_CDB_SIZE; | ||
1156 | |||
1149 | if (scsi_add_host(shost, &unit->device) < 0) | 1157 | if (scsi_add_host(shost, &unit->device) < 0) |
1150 | goto fail_shost_put; | 1158 | goto fail_shost_put; |
1151 | 1159 | ||
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 83b734aec923..52b25f8b111d 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
@@ -880,6 +880,7 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud) | |||
880 | } | 880 | } |
881 | 881 | ||
882 | shost->hostdata[0] = (unsigned long)lu; | 882 | shost->hostdata[0] = (unsigned long)lu; |
883 | shost->max_cmd_len = SBP2_MAX_CDB_SIZE; | ||
883 | 884 | ||
884 | if (!scsi_add_host(shost, &ud->device)) { | 885 | if (!scsi_add_host(shost, &ud->device)) { |
885 | lu->shost = shost; | 886 | lu->shost = shost; |
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index c5036f1cc5b0..64a3a66a8a39 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h | |||
@@ -25,6 +25,12 @@ | |||
25 | #define SBP2_DEVICE_NAME "sbp2" | 25 | #define SBP2_DEVICE_NAME "sbp2" |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * There is no transport protocol limit to the CDB length, but we implement | ||
29 | * a fixed length only. 16 bytes is enough for disks larger than 2 TB. | ||
30 | */ | ||
31 | #define SBP2_MAX_CDB_SIZE 16 | ||
32 | |||
33 | /* | ||
28 | * SBP-2 specific definitions | 34 | * SBP-2 specific definitions |
29 | */ | 35 | */ |
30 | 36 | ||
@@ -51,7 +57,7 @@ struct sbp2_command_orb { | |||
51 | u32 data_descriptor_hi; | 57 | u32 data_descriptor_hi; |
52 | u32 data_descriptor_lo; | 58 | u32 data_descriptor_lo; |
53 | u32 misc; | 59 | u32 misc; |
54 | u8 cdb[12]; | 60 | u8 cdb[SBP2_MAX_CDB_SIZE]; |
55 | } __attribute__((packed)); | 61 | } __attribute__((packed)); |
56 | 62 | ||
57 | #define SBP2_LOGIN_REQUEST 0x0 | 63 | #define SBP2_LOGIN_REQUEST 0x0 |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 53075424a434..360fb67a30d7 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -2117,6 +2117,47 @@ static int domain_add_dev_info(struct dmar_domain *domain, | |||
2117 | return 0; | 2117 | return 0; |
2118 | } | 2118 | } |
2119 | 2119 | ||
2120 | static int iommu_should_identity_map(struct pci_dev *pdev, int startup) | ||
2121 | { | ||
2122 | if (iommu_identity_mapping == 2) | ||
2123 | return IS_GFX_DEVICE(pdev); | ||
2124 | |||
2125 | /* | ||
2126 | * We want to start off with all devices in the 1:1 domain, and | ||
2127 | * take them out later if we find they can't access all of memory. | ||
2128 | * | ||
2129 | * However, we can't do this for PCI devices behind bridges, | ||
2130 | * because all PCI devices behind the same bridge will end up | ||
2131 | * with the same source-id on their transactions. | ||
2132 | * | ||
2133 | * Practically speaking, we can't change things around for these | ||
2134 | * devices at run-time, because we can't be sure there'll be no | ||
2135 | * DMA transactions in flight for any of their siblings. | ||
2136 | * | ||
2137 | * So PCI devices (unless they're on the root bus) as well as | ||
2138 | * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of | ||
2139 | * the 1:1 domain, just in _case_ one of their siblings turns out | ||
2140 | * not to be able to map all of memory. | ||
2141 | */ | ||
2142 | if (!pdev->is_pcie) { | ||
2143 | if (!pci_is_root_bus(pdev->bus)) | ||
2144 | return 0; | ||
2145 | if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) | ||
2146 | return 0; | ||
2147 | } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) | ||
2148 | return 0; | ||
2149 | |||
2150 | /* | ||
2151 | * At boot time, we don't yet know if devices will be 64-bit capable. | ||
2152 | * Assume that they will -- if they turn out not to be, then we can | ||
2153 | * take them out of the 1:1 domain later. | ||
2154 | */ | ||
2155 | if (!startup) | ||
2156 | return pdev->dma_mask > DMA_BIT_MASK(32); | ||
2157 | |||
2158 | return 1; | ||
2159 | } | ||
2160 | |||
2120 | static int iommu_prepare_static_identity_mapping(void) | 2161 | static int iommu_prepare_static_identity_mapping(void) |
2121 | { | 2162 | { |
2122 | struct pci_dev *pdev = NULL; | 2163 | struct pci_dev *pdev = NULL; |
@@ -2127,16 +2168,18 @@ static int iommu_prepare_static_identity_mapping(void) | |||
2127 | return -EFAULT; | 2168 | return -EFAULT; |
2128 | 2169 | ||
2129 | for_each_pci_dev(pdev) { | 2170 | for_each_pci_dev(pdev) { |
2130 | printk(KERN_INFO "IOMMU: identity mapping for device %s\n", | 2171 | if (iommu_should_identity_map(pdev, 1)) { |
2131 | pci_name(pdev)); | 2172 | printk(KERN_INFO "IOMMU: identity mapping for device %s\n", |
2173 | pci_name(pdev)); | ||
2132 | 2174 | ||
2133 | ret = domain_context_mapping(si_domain, pdev, | 2175 | ret = domain_context_mapping(si_domain, pdev, |
2134 | CONTEXT_TT_MULTI_LEVEL); | 2176 | CONTEXT_TT_MULTI_LEVEL); |
2135 | if (ret) | 2177 | if (ret) |
2136 | return ret; | 2178 | return ret; |
2137 | ret = domain_add_dev_info(si_domain, pdev); | 2179 | ret = domain_add_dev_info(si_domain, pdev); |
2138 | if (ret) | 2180 | if (ret) |
2139 | return ret; | 2181 | return ret; |
2182 | } | ||
2140 | } | 2183 | } |
2141 | 2184 | ||
2142 | return 0; | 2185 | return 0; |
@@ -2291,6 +2334,10 @@ int __init init_dmars(void) | |||
2291 | * identity mapping if iommu_identity_mapping is set. | 2334 | * identity mapping if iommu_identity_mapping is set. |
2292 | */ | 2335 | */ |
2293 | if (!iommu_pass_through) { | 2336 | if (!iommu_pass_through) { |
2337 | #ifdef CONFIG_DMAR_BROKEN_GFX_WA | ||
2338 | if (!iommu_identity_mapping) | ||
2339 | iommu_identity_mapping = 2; | ||
2340 | #endif | ||
2294 | if (iommu_identity_mapping) | 2341 | if (iommu_identity_mapping) |
2295 | iommu_prepare_static_identity_mapping(); | 2342 | iommu_prepare_static_identity_mapping(); |
2296 | /* | 2343 | /* |
@@ -2368,15 +2415,15 @@ error: | |||
2368 | return ret; | 2415 | return ret; |
2369 | } | 2416 | } |
2370 | 2417 | ||
2418 | /* Returns a number of VTD pages, but aligned to MM page size */ | ||
2371 | static inline unsigned long aligned_nrpages(unsigned long host_addr, | 2419 | static inline unsigned long aligned_nrpages(unsigned long host_addr, |
2372 | size_t size) | 2420 | size_t size) |
2373 | { | 2421 | { |
2374 | host_addr &= ~PAGE_MASK; | 2422 | host_addr &= ~PAGE_MASK; |
2375 | host_addr += size + PAGE_SIZE - 1; | 2423 | return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; |
2376 | |||
2377 | return host_addr >> VTD_PAGE_SHIFT; | ||
2378 | } | 2424 | } |
2379 | 2425 | ||
2426 | /* This takes a number of _MM_ pages, not VTD pages */ | ||
2380 | static struct iova *intel_alloc_iova(struct device *dev, | 2427 | static struct iova *intel_alloc_iova(struct device *dev, |
2381 | struct dmar_domain *domain, | 2428 | struct dmar_domain *domain, |
2382 | unsigned long nrpages, uint64_t dma_mask) | 2429 | unsigned long nrpages, uint64_t dma_mask) |
@@ -2443,16 +2490,24 @@ static int iommu_dummy(struct pci_dev *pdev) | |||
2443 | } | 2490 | } |
2444 | 2491 | ||
2445 | /* Check if the pdev needs to go through non-identity map and unmap process.*/ | 2492 | /* Check if the pdev needs to go through non-identity map and unmap process.*/ |
2446 | static int iommu_no_mapping(struct pci_dev *pdev) | 2493 | static int iommu_no_mapping(struct device *dev) |
2447 | { | 2494 | { |
2495 | struct pci_dev *pdev; | ||
2448 | int found; | 2496 | int found; |
2449 | 2497 | ||
2498 | if (unlikely(dev->bus != &pci_bus_type)) | ||
2499 | return 1; | ||
2500 | |||
2501 | pdev = to_pci_dev(dev); | ||
2502 | if (iommu_dummy(pdev)) | ||
2503 | return 1; | ||
2504 | |||
2450 | if (!iommu_identity_mapping) | 2505 | if (!iommu_identity_mapping) |
2451 | return iommu_dummy(pdev); | 2506 | return 0; |
2452 | 2507 | ||
2453 | found = identity_mapping(pdev); | 2508 | found = identity_mapping(pdev); |
2454 | if (found) { | 2509 | if (found) { |
2455 | if (pdev->dma_mask > DMA_BIT_MASK(32)) | 2510 | if (iommu_should_identity_map(pdev, 0)) |
2456 | return 1; | 2511 | return 1; |
2457 | else { | 2512 | else { |
2458 | /* | 2513 | /* |
@@ -2469,9 +2524,12 @@ static int iommu_no_mapping(struct pci_dev *pdev) | |||
2469 | * In case of a detached 64 bit DMA device from vm, the device | 2524 | * In case of a detached 64 bit DMA device from vm, the device |
2470 | * is put into si_domain for identity mapping. | 2525 | * is put into si_domain for identity mapping. |
2471 | */ | 2526 | */ |
2472 | if (pdev->dma_mask > DMA_BIT_MASK(32)) { | 2527 | if (iommu_should_identity_map(pdev, 0)) { |
2473 | int ret; | 2528 | int ret; |
2474 | ret = domain_add_dev_info(si_domain, pdev); | 2529 | ret = domain_add_dev_info(si_domain, pdev); |
2530 | if (ret) | ||
2531 | return 0; | ||
2532 | ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL); | ||
2475 | if (!ret) { | 2533 | if (!ret) { |
2476 | printk(KERN_INFO "64bit %s uses identity mapping\n", | 2534 | printk(KERN_INFO "64bit %s uses identity mapping\n", |
2477 | pci_name(pdev)); | 2535 | pci_name(pdev)); |
@@ -2480,7 +2538,7 @@ static int iommu_no_mapping(struct pci_dev *pdev) | |||
2480 | } | 2538 | } |
2481 | } | 2539 | } |
2482 | 2540 | ||
2483 | return iommu_dummy(pdev); | 2541 | return 0; |
2484 | } | 2542 | } |
2485 | 2543 | ||
2486 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | 2544 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, |
@@ -2496,7 +2554,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2496 | 2554 | ||
2497 | BUG_ON(dir == DMA_NONE); | 2555 | BUG_ON(dir == DMA_NONE); |
2498 | 2556 | ||
2499 | if (iommu_no_mapping(pdev)) | 2557 | if (iommu_no_mapping(hwdev)) |
2500 | return paddr; | 2558 | return paddr; |
2501 | 2559 | ||
2502 | domain = get_valid_domain_for_dev(pdev); | 2560 | domain = get_valid_domain_for_dev(pdev); |
@@ -2506,7 +2564,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2506 | iommu = domain_get_iommu(domain); | 2564 | iommu = domain_get_iommu(domain); |
2507 | size = aligned_nrpages(paddr, size); | 2565 | size = aligned_nrpages(paddr, size); |
2508 | 2566 | ||
2509 | iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); | 2567 | iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), |
2568 | pdev->dma_mask); | ||
2510 | if (!iova) | 2569 | if (!iova) |
2511 | goto error; | 2570 | goto error; |
2512 | 2571 | ||
@@ -2635,7 +2694,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
2635 | struct iova *iova; | 2694 | struct iova *iova; |
2636 | struct intel_iommu *iommu; | 2695 | struct intel_iommu *iommu; |
2637 | 2696 | ||
2638 | if (iommu_no_mapping(pdev)) | 2697 | if (iommu_no_mapping(dev)) |
2639 | return; | 2698 | return; |
2640 | 2699 | ||
2641 | domain = find_domain(pdev); | 2700 | domain = find_domain(pdev); |
@@ -2726,7 +2785,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2726 | struct iova *iova; | 2785 | struct iova *iova; |
2727 | struct intel_iommu *iommu; | 2786 | struct intel_iommu *iommu; |
2728 | 2787 | ||
2729 | if (iommu_no_mapping(pdev)) | 2788 | if (iommu_no_mapping(hwdev)) |
2730 | return; | 2789 | return; |
2731 | 2790 | ||
2732 | domain = find_domain(pdev); | 2791 | domain = find_domain(pdev); |
@@ -2785,7 +2844,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
2785 | struct intel_iommu *iommu; | 2844 | struct intel_iommu *iommu; |
2786 | 2845 | ||
2787 | BUG_ON(dir == DMA_NONE); | 2846 | BUG_ON(dir == DMA_NONE); |
2788 | if (iommu_no_mapping(pdev)) | 2847 | if (iommu_no_mapping(hwdev)) |
2789 | return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); | 2848 | return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); |
2790 | 2849 | ||
2791 | domain = get_valid_domain_for_dev(pdev); | 2850 | domain = get_valid_domain_for_dev(pdev); |
@@ -2797,7 +2856,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
2797 | for_each_sg(sglist, sg, nelems, i) | 2856 | for_each_sg(sglist, sg, nelems, i) |
2798 | size += aligned_nrpages(sg->offset, sg->length); | 2857 | size += aligned_nrpages(sg->offset, sg->length); |
2799 | 2858 | ||
2800 | iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); | 2859 | iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), |
2860 | pdev->dma_mask); | ||
2801 | if (!iova) { | 2861 | if (!iova) { |
2802 | sglist->dma_length = 0; | 2862 | sglist->dma_length = 0; |
2803 | return 0; | 2863 | return 0; |
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 32b27739ec2a..713f7bf5afb3 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c | |||
@@ -283,7 +283,7 @@ static void ds1374_work(struct work_struct *work) | |||
283 | 283 | ||
284 | stat = i2c_smbus_read_byte_data(client, DS1374_REG_SR); | 284 | stat = i2c_smbus_read_byte_data(client, DS1374_REG_SR); |
285 | if (stat < 0) | 285 | if (stat < 0) |
286 | return; | 286 | goto unlock; |
287 | 287 | ||
288 | if (stat & DS1374_REG_SR_AF) { | 288 | if (stat & DS1374_REG_SR_AF) { |
289 | stat &= ~DS1374_REG_SR_AF; | 289 | stat &= ~DS1374_REG_SR_AF; |
@@ -302,7 +302,7 @@ static void ds1374_work(struct work_struct *work) | |||
302 | out: | 302 | out: |
303 | if (!ds1374->exiting) | 303 | if (!ds1374->exiting) |
304 | enable_irq(client->irq); | 304 | enable_irq(client->irq); |
305 | 305 | unlock: | |
306 | mutex_unlock(&ds1374->mutex); | 306 | mutex_unlock(&ds1374->mutex); |
307 | } | 307 | } |
308 | 308 | ||
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index f8d9045d668a..0f7a30b7d2d1 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c | |||
@@ -1261,7 +1261,7 @@ static int mon_alloc_buff(struct mon_pgmap *map, int npages) | |||
1261 | return -ENOMEM; | 1261 | return -ENOMEM; |
1262 | } | 1262 | } |
1263 | map[n].ptr = (unsigned char *) vaddr; | 1263 | map[n].ptr = (unsigned char *) vaddr; |
1264 | map[n].pg = virt_to_page(vaddr); | 1264 | map[n].pg = virt_to_page((void *) vaddr); |
1265 | } | 1265 | } |
1266 | return 0; | 1266 | return 0; |
1267 | } | 1267 | } |
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 53ea05645ff8..53eb39652791 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c | |||
@@ -1513,8 +1513,6 @@ register_framebuffer(struct fb_info *fb_info) | |||
1513 | if (!registered_fb[i]) | 1513 | if (!registered_fb[i]) |
1514 | break; | 1514 | break; |
1515 | fb_info->node = i; | 1515 | fb_info->node = i; |
1516 | mutex_init(&fb_info->lock); | ||
1517 | mutex_init(&fb_info->mm_lock); | ||
1518 | 1516 | ||
1519 | fb_info->dev = device_create(fb_class, fb_info->device, | 1517 | fb_info->dev = device_create(fb_class, fb_info->device, |
1520 | MKDEV(FB_MAJOR, i), NULL, "fb%d", i); | 1518 | MKDEV(FB_MAJOR, i), NULL, "fb%d", i); |
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c index d4a2c11d9809..afc04df39a03 100644 --- a/drivers/video/fbsysfs.c +++ b/drivers/video/fbsysfs.c | |||
@@ -62,6 +62,9 @@ struct fb_info *framebuffer_alloc(size_t size, struct device *dev) | |||
62 | mutex_init(&info->bl_curve_mutex); | 62 | mutex_init(&info->bl_curve_mutex); |
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | mutex_init(&info->lock); | ||
66 | mutex_init(&info->mm_lock); | ||
67 | |||
65 | return info; | 68 | return info; |
66 | #undef PADDING | 69 | #undef PADDING |
67 | #undef BYTES_PER_LONG | 70 | #undef BYTES_PER_LONG |
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c index 59c3a2e14913..76bc51b616d1 100644 --- a/drivers/video/matrox/matroxfb_base.c +++ b/drivers/video/matrox/matroxfb_base.c | |||
@@ -2083,6 +2083,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm | |||
2083 | spin_lock_init(&ACCESS_FBINFO(lock.accel)); | 2083 | spin_lock_init(&ACCESS_FBINFO(lock.accel)); |
2084 | init_rwsem(&ACCESS_FBINFO(crtc2.lock)); | 2084 | init_rwsem(&ACCESS_FBINFO(crtc2.lock)); |
2085 | init_rwsem(&ACCESS_FBINFO(altout.lock)); | 2085 | init_rwsem(&ACCESS_FBINFO(altout.lock)); |
2086 | mutex_init(&ACCESS_FBINFO(fbcon).lock); | ||
2086 | mutex_init(&ACCESS_FBINFO(fbcon).mm_lock); | 2087 | mutex_init(&ACCESS_FBINFO(fbcon).mm_lock); |
2087 | ACCESS_FBINFO(irq_flags) = 0; | 2088 | ACCESS_FBINFO(irq_flags) = 0; |
2088 | init_waitqueue_head(&ACCESS_FBINFO(crtc1.vsync.wait)); | 2089 | init_waitqueue_head(&ACCESS_FBINFO(crtc1.vsync.wait)); |
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c index 43680e545427..bb63c07e13de 100644 --- a/drivers/video/s3c-fb.c +++ b/drivers/video/s3c-fb.c | |||
@@ -211,23 +211,21 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var, | |||
211 | 211 | ||
212 | /** | 212 | /** |
213 | * s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock. | 213 | * s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock. |
214 | * @id: window id. | ||
214 | * @sfb: The hardware state. | 215 | * @sfb: The hardware state. |
215 | * @pixclock: The pixel clock wanted, in picoseconds. | 216 | * @pixclock: The pixel clock wanted, in picoseconds. |
216 | * | 217 | * |
217 | * Given the specified pixel clock, work out the necessary divider to get | 218 | * Given the specified pixel clock, work out the necessary divider to get |
218 | * close to the output frequency. | 219 | * close to the output frequency. |
219 | */ | 220 | */ |
220 | static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk) | 221 | static int s3c_fb_calc_pixclk(unsigned char id, struct s3c_fb *sfb, unsigned int pixclk) |
221 | { | 222 | { |
223 | struct s3c_fb_pd_win *win = sfb->pdata->win[id]; | ||
222 | unsigned long clk = clk_get_rate(sfb->bus_clk); | 224 | unsigned long clk = clk_get_rate(sfb->bus_clk); |
223 | unsigned long long tmp; | ||
224 | unsigned int result; | 225 | unsigned int result; |
225 | 226 | ||
226 | tmp = (unsigned long long)clk; | 227 | pixclk *= win->win_mode.refresh; |
227 | tmp *= pixclk; | 228 | result = clk / pixclk; |
228 | |||
229 | do_div(tmp, 1000000000UL); | ||
230 | result = (unsigned int)tmp / 1000; | ||
231 | 229 | ||
232 | dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", | 230 | dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", |
233 | pixclk, clk, result, clk / result); | 231 | pixclk, clk, result, clk / result); |
@@ -267,6 +265,7 @@ static int s3c_fb_set_par(struct fb_info *info) | |||
267 | struct s3c_fb *sfb = win->parent; | 265 | struct s3c_fb *sfb = win->parent; |
268 | void __iomem *regs = sfb->regs; | 266 | void __iomem *regs = sfb->regs; |
269 | int win_no = win->index; | 267 | int win_no = win->index; |
268 | u32 osdc_data = 0; | ||
270 | u32 data; | 269 | u32 data; |
271 | u32 pagewidth; | 270 | u32 pagewidth; |
272 | int clkdiv; | 271 | int clkdiv; |
@@ -302,7 +301,7 @@ static int s3c_fb_set_par(struct fb_info *info) | |||
302 | /* use window 0 as the basis for the lcd output timings */ | 301 | /* use window 0 as the basis for the lcd output timings */ |
303 | 302 | ||
304 | if (win_no == 0) { | 303 | if (win_no == 0) { |
305 | clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock); | 304 | clkdiv = s3c_fb_calc_pixclk(win_no, sfb, var->pixclock); |
306 | 305 | ||
307 | data = sfb->pdata->vidcon0; | 306 | data = sfb->pdata->vidcon0; |
308 | data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR); | 307 | data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR); |
@@ -359,8 +358,6 @@ static int s3c_fb_set_par(struct fb_info *info) | |||
359 | 358 | ||
360 | data = var->xres * var->yres; | 359 | data = var->xres * var->yres; |
361 | 360 | ||
362 | u32 osdc_data = 0; | ||
363 | |||
364 | osdc_data = VIDISD14C_ALPHA1_R(0xf) | | 361 | osdc_data = VIDISD14C_ALPHA1_R(0xf) | |
365 | VIDISD14C_ALPHA1_G(0xf) | | 362 | VIDISD14C_ALPHA1_G(0xf) | |
366 | VIDISD14C_ALPHA1_B(0xf); | 363 | VIDISD14C_ALPHA1_B(0xf); |
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index fd33455389b8..4a067f0d0ceb 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c | |||
@@ -6367,7 +6367,6 @@ error_3: vfree(ivideo->bios_abase); | |||
6367 | sis_fb_info->fix = ivideo->sisfb_fix; | 6367 | sis_fb_info->fix = ivideo->sisfb_fix; |
6368 | sis_fb_info->screen_base = ivideo->video_vbase + ivideo->video_offset; | 6368 | sis_fb_info->screen_base = ivideo->video_vbase + ivideo->video_offset; |
6369 | sis_fb_info->fbops = &sisfb_ops; | 6369 | sis_fb_info->fbops = &sisfb_ops; |
6370 | sisfb_get_fix(&sis_fb_info->fix, -1, sis_fb_info); | ||
6371 | sis_fb_info->pseudo_palette = ivideo->pseudo_palette; | 6370 | sis_fb_info->pseudo_palette = ivideo->pseudo_palette; |
6372 | 6371 | ||
6373 | fb_alloc_cmap(&sis_fb_info->cmap, 256 , 0); | 6372 | fb_alloc_cmap(&sis_fb_info->cmap, 256 , 0); |
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c index 16d4f4c7d52b..98f24f0ec00d 100644 --- a/drivers/video/sm501fb.c +++ b/drivers/video/sm501fb.c | |||
@@ -1624,8 +1624,6 @@ static int __devinit sm501fb_start_one(struct sm501fb_info *info, | |||
1624 | if (!fbi) | 1624 | if (!fbi) |
1625 | return 0; | 1625 | return 0; |
1626 | 1626 | ||
1627 | mutex_init(&info->fb[head]->mm_lock); | ||
1628 | |||
1629 | ret = sm501fb_init_fb(info->fb[head], head, drvname); | 1627 | ret = sm501fb_init_fb(info->fb[head], head, drvname); |
1630 | if (ret) { | 1628 | if (ret) { |
1631 | dev_err(info->dev, "cannot initialise fb %s\n", drvname); | 1629 | dev_err(info->dev, "cannot initialise fb %s\n", drvname); |
diff --git a/drivers/vlynq/Kconfig b/drivers/vlynq/Kconfig index f6542211db48..a9efb1625321 100644 --- a/drivers/vlynq/Kconfig +++ b/drivers/vlynq/Kconfig | |||
@@ -13,7 +13,7 @@ config VLYNQ | |||
13 | 13 | ||
14 | config VLYNQ_DEBUG | 14 | config VLYNQ_DEBUG |
15 | bool "VLYNQ bus debug" | 15 | bool "VLYNQ bus debug" |
16 | depends on VLYNQ && KERNEL_DEBUG | 16 | depends on VLYNQ && DEBUG_KERNEL |
17 | help | 17 | help |
18 | Turn on VLYNQ bus debugging. | 18 | Turn on VLYNQ bus debugging. |
19 | 19 | ||
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c index 7335433b067b..f05d2a368367 100644 --- a/drivers/vlynq/vlynq.c +++ b/drivers/vlynq/vlynq.c | |||
@@ -76,7 +76,7 @@ struct vlynq_regs { | |||
76 | u32 int_device[8]; | 76 | u32 int_device[8]; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | #ifdef VLYNQ_DEBUG | 79 | #ifdef CONFIG_VLYNQ_DEBUG |
80 | static void vlynq_dump_regs(struct vlynq_device *dev) | 80 | static void vlynq_dump_regs(struct vlynq_device *dev) |
81 | { | 81 | { |
82 | int i; | 82 | int i; |
diff --git a/fs/compat.c b/fs/compat.c index cdd51a3a7c53..fbadb947727b 100644 --- a/fs/compat.c +++ b/fs/compat.c | |||
@@ -1486,8 +1486,8 @@ int compat_do_execve(char * filename, | |||
1486 | if (!bprm) | 1486 | if (!bprm) |
1487 | goto out_files; | 1487 | goto out_files; |
1488 | 1488 | ||
1489 | retval = mutex_lock_interruptible(¤t->cred_guard_mutex); | 1489 | retval = -ERESTARTNOINTR; |
1490 | if (retval < 0) | 1490 | if (mutex_lock_interruptible(¤t->cred_guard_mutex)) |
1491 | goto out_free; | 1491 | goto out_free; |
1492 | current->in_execve = 1; | 1492 | current->in_execve = 1; |
1493 | 1493 | ||
@@ -1277,8 +1277,8 @@ int do_execve(char * filename, | |||
1277 | if (!bprm) | 1277 | if (!bprm) |
1278 | goto out_files; | 1278 | goto out_files; |
1279 | 1279 | ||
1280 | retval = mutex_lock_interruptible(¤t->cred_guard_mutex); | 1280 | retval = -ERESTARTNOINTR; |
1281 | if (retval < 0) | 1281 | if (mutex_lock_interruptible(¤t->cred_guard_mutex)) |
1282 | goto out_free; | 1282 | goto out_free; |
1283 | current->in_execve = 1; | 1283 | current->in_execve = 1; |
1284 | 1284 | ||
@@ -112,8 +112,13 @@ restart: | |||
112 | mutex_unlock(&mutex); | 112 | mutex_unlock(&mutex); |
113 | } | 113 | } |
114 | 114 | ||
115 | /* | ||
116 | * sync everything. Start out by waking pdflush, because that writes back | ||
117 | * all queues in parallel. | ||
118 | */ | ||
115 | SYSCALL_DEFINE0(sync) | 119 | SYSCALL_DEFINE0(sync) |
116 | { | 120 | { |
121 | wakeup_pdflush(0); | ||
117 | sync_filesystems(0); | 122 | sync_filesystems(0); |
118 | sync_filesystems(1); | 123 | sync_filesystems(1); |
119 | if (unlikely(laptop_mode)) | 124 | if (unlikely(laptop_mode)) |
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 7605c5e9589f..03ec16779802 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h | |||
@@ -125,6 +125,8 @@ static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* | |||
125 | #ifdef ELF_CORE_COPY_TASK_REGS | 125 | #ifdef ELF_CORE_COPY_TASK_REGS |
126 | 126 | ||
127 | return ELF_CORE_COPY_TASK_REGS(t, elfregs); | 127 | return ELF_CORE_COPY_TASK_REGS(t, elfregs); |
128 | #else | ||
129 | elf_core_copy_regs(elfregs, task_pt_regs(t)); | ||
128 | #endif | 130 | #endif |
129 | return 0; | 131 | return 0; |
130 | } | 132 | } |
diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 9823946adbc5..192d1e43c43c 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h | |||
@@ -127,6 +127,7 @@ struct fw_card { | |||
127 | struct delayed_work work; | 127 | struct delayed_work work; |
128 | int bm_retries; | 128 | int bm_retries; |
129 | int bm_generation; | 129 | int bm_generation; |
130 | __be32 bm_transaction_data[2]; | ||
130 | 131 | ||
131 | bool broadcast_channel_allocated; | 132 | bool broadcast_channel_allocated; |
132 | u32 broadcast_channel; | 133 | u32 broadcast_channel; |
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h index 98a1d8cfb73d..99adcdc0d3ca 100644 --- a/include/linux/sysrq.h +++ b/include/linux/sysrq.h | |||
@@ -14,6 +14,8 @@ | |||
14 | #ifndef _LINUX_SYSRQ_H | 14 | #ifndef _LINUX_SYSRQ_H |
15 | #define _LINUX_SYSRQ_H | 15 | #define _LINUX_SYSRQ_H |
16 | 16 | ||
17 | #include <linux/errno.h> | ||
18 | |||
17 | struct pt_regs; | 19 | struct pt_regs; |
18 | struct tty_struct; | 20 | struct tty_struct; |
19 | 21 | ||
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index d55a50da2347..a641eb753b8c 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -2020,7 +2020,7 @@ fail: | |||
2020 | 2020 | ||
2021 | static void perf_mmap_free_page(unsigned long addr) | 2021 | static void perf_mmap_free_page(unsigned long addr) |
2022 | { | 2022 | { |
2023 | struct page *page = virt_to_page(addr); | 2023 | struct page *page = virt_to_page((void *)addr); |
2024 | 2024 | ||
2025 | page->mapping = NULL; | 2025 | page->mapping = NULL; |
2026 | __free_page(page); | 2026 | __free_page(page); |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 61c78b2c07ba..082c320e4dbf 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -181,8 +181,8 @@ int ptrace_attach(struct task_struct *task) | |||
181 | * interference; SUID, SGID and LSM creds get determined differently | 181 | * interference; SUID, SGID and LSM creds get determined differently |
182 | * under ptrace. | 182 | * under ptrace. |
183 | */ | 183 | */ |
184 | retval = mutex_lock_interruptible(&task->cred_guard_mutex); | 184 | retval = -ERESTARTNOINTR; |
185 | if (retval < 0) | 185 | if (mutex_lock_interruptible(&task->cred_guard_mutex)) |
186 | goto out; | 186 | goto out; |
187 | 187 | ||
188 | task_lock(task); | 188 | task_lock(task); |
diff --git a/mm/filemap.c b/mm/filemap.c index 22396713feb9..ccea3b665c12 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -2272,6 +2272,7 @@ again: | |||
2272 | pagefault_enable(); | 2272 | pagefault_enable(); |
2273 | flush_dcache_page(page); | 2273 | flush_dcache_page(page); |
2274 | 2274 | ||
2275 | mark_page_accessed(page); | ||
2275 | status = a_ops->write_end(file, mapping, pos, bytes, copied, | 2276 | status = a_ops->write_end(file, mapping, pos, bytes, copied, |
2276 | page, fsdata); | 2277 | page, fsdata); |
2277 | if (unlikely(status < 0)) | 2278 | if (unlikely(status < 0)) |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e0f2cdf9d8b1..ad7cd1c56b07 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1983,7 +1983,7 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) | |||
1983 | unsigned long alloc_end = addr + (PAGE_SIZE << order); | 1983 | unsigned long alloc_end = addr + (PAGE_SIZE << order); |
1984 | unsigned long used = addr + PAGE_ALIGN(size); | 1984 | unsigned long used = addr + PAGE_ALIGN(size); |
1985 | 1985 | ||
1986 | split_page(virt_to_page(addr), order); | 1986 | split_page(virt_to_page((void *)addr), order); |
1987 | while (used < alloc_end) { | 1987 | while (used < alloc_end) { |
1988 | free_page(used); | 1988 | free_page(used); |
1989 | used += PAGE_SIZE; | 1989 | used += PAGE_SIZE; |