aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-28 12:49:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-28 12:49:27 -0400
commit0d8762c9ee40cf83d5dbf3a22843bc566912b592 (patch)
treed3ae691a55226e3671caa1f837e127693f6742e8
parentcf76dddb22c019f03ada6479210f894f19bd591b (diff)
parent6afe40b4dace385d7ba2faf24b352f066f3b71bf (diff)
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: lockdep: fix irqs on/off ip tracing lockdep: minor fix for debug_show_all_locks() x86: restore the old swiotlb alloc_coherent behavior x86: use GFP_DMA for 24bit coherent_dma_mask swiotlb: remove panic for alloc_coherent failure xen: compilation fix of drivers/xen/events.c on IA64 xen: portability clean up and some minor clean up for xencomm.c xen: don't reload cr3 on suspend kernel/resource: fix reserve_region_with_split() section mismatch printk: remove unused code from kernel/printk.c
-rw-r--r--arch/x86/include/asm/dma-mapping.h4
-rw-r--r--arch/x86/kernel/pci-swiotlb_64.c14
-rw-r--r--drivers/xen/events.c2
-rw-r--r--drivers/xen/manage.c2
-rw-r--r--drivers/xen/xencomm.c23
-rw-r--r--kernel/lockdep.c17
-rw-r--r--kernel/printk.c39
-rw-r--r--kernel/resource.c2
-rw-r--r--lib/swiotlb.c6
9 files changed, 34 insertions, 75 deletions
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 4a5397bfce27..7f225a4b2a26 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -255,9 +255,11 @@ static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
255 255
256static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) 256static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
257{ 257{
258#ifdef CONFIG_X86_64
259 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); 258 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
260 259
260 if (dma_mask <= DMA_24BIT_MASK)
261 gfp |= GFP_DMA;
262#ifdef CONFIG_X86_64
261 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) 263 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
262 gfp |= GFP_DMA32; 264 gfp |= GFP_DMA32;
263#endif 265#endif
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c
index c4ce0332759e..3c539d111abb 100644
--- a/arch/x86/kernel/pci-swiotlb_64.c
+++ b/arch/x86/kernel/pci-swiotlb_64.c
@@ -18,9 +18,21 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
18 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); 18 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
19} 19}
20 20
21static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
22 dma_addr_t *dma_handle, gfp_t flags)
23{
24 void *vaddr;
25
26 vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags);
27 if (vaddr)
28 return vaddr;
29
30 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
31}
32
21struct dma_mapping_ops swiotlb_dma_ops = { 33struct dma_mapping_ops swiotlb_dma_ops = {
22 .mapping_error = swiotlb_dma_mapping_error, 34 .mapping_error = swiotlb_dma_mapping_error,
23 .alloc_coherent = swiotlb_alloc_coherent, 35 .alloc_coherent = x86_swiotlb_alloc_coherent,
24 .free_coherent = swiotlb_free_coherent, 36 .free_coherent = swiotlb_free_coherent,
25 .map_single = swiotlb_map_single_phys, 37 .map_single = swiotlb_map_single_phys,
26 .unmap_single = swiotlb_unmap_single, 38 .unmap_single = swiotlb_unmap_single,
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 9ce1ab6c268d..1e3b934a4cf7 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -774,7 +774,7 @@ void xen_poll_irq(int irq)
774 774
775 poll.nr_ports = 1; 775 poll.nr_ports = 1;
776 poll.timeout = 0; 776 poll.timeout = 0;
777 poll.ports = &evtchn; 777 set_xen_guest_handle(poll.ports, &evtchn);
778 778
779 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) 779 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
780 BUG(); 780 BUG();
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index d0e87cbe157c..9b91617b9582 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -39,8 +39,6 @@ static int xen_suspend(void *data)
39 39
40 BUG_ON(!irqs_disabled()); 40 BUG_ON(!irqs_disabled());
41 41
42 load_cr3(swapper_pg_dir);
43
44 err = device_power_down(PMSG_SUSPEND); 42 err = device_power_down(PMSG_SUSPEND);
45 if (err) { 43 if (err) {
46 printk(KERN_ERR "xen_suspend: device_power_down failed: %d\n", 44 printk(KERN_ERR "xen_suspend: device_power_down failed: %d\n",
diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c
index 797cb4e31f07..a240b2c20b99 100644
--- a/drivers/xen/xencomm.c
+++ b/drivers/xen/xencomm.c
@@ -23,13 +23,7 @@
23#include <asm/page.h> 23#include <asm/page.h>
24#include <xen/xencomm.h> 24#include <xen/xencomm.h>
25#include <xen/interface/xen.h> 25#include <xen/interface/xen.h>
26#ifdef __ia64__ 26#include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */
27#include <asm/xen/xencomm.h> /* for is_kern_addr() */
28#endif
29
30#ifdef HAVE_XEN_PLATFORM_COMPAT_H
31#include <xen/platform-compat.h>
32#endif
33 27
34static int xencomm_init(struct xencomm_desc *desc, 28static int xencomm_init(struct xencomm_desc *desc,
35 void *buffer, unsigned long bytes) 29 void *buffer, unsigned long bytes)
@@ -157,20 +151,11 @@ static int xencomm_create(void *buffer, unsigned long bytes,
157 return 0; 151 return 0;
158} 152}
159 153
160/* check if memory address is within VMALLOC region */
161static int is_phys_contiguous(unsigned long addr)
162{
163 if (!is_kernel_addr(addr))
164 return 0;
165
166 return (addr < VMALLOC_START) || (addr >= VMALLOC_END);
167}
168
169static struct xencomm_handle *xencomm_create_inline(void *ptr) 154static struct xencomm_handle *xencomm_create_inline(void *ptr)
170{ 155{
171 unsigned long paddr; 156 unsigned long paddr;
172 157
173 BUG_ON(!is_phys_contiguous((unsigned long)ptr)); 158 BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr));
174 159
175 paddr = (unsigned long)xencomm_pa(ptr); 160 paddr = (unsigned long)xencomm_pa(ptr);
176 BUG_ON(paddr & XENCOMM_INLINE_FLAG); 161 BUG_ON(paddr & XENCOMM_INLINE_FLAG);
@@ -202,7 +187,7 @@ struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
202 int rc; 187 int rc;
203 struct xencomm_desc *desc; 188 struct xencomm_desc *desc;
204 189
205 if (is_phys_contiguous((unsigned long)ptr)) 190 if (xencomm_is_phys_contiguous((unsigned long)ptr))
206 return xencomm_create_inline(ptr); 191 return xencomm_create_inline(ptr);
207 192
208 rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL); 193 rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
@@ -219,7 +204,7 @@ struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
219 int rc; 204 int rc;
220 struct xencomm_desc *desc = NULL; 205 struct xencomm_desc *desc = NULL;
221 206
222 if (is_phys_contiguous((unsigned long)ptr)) 207 if (xencomm_is_phys_contiguous((unsigned long)ptr))
223 return xencomm_create_inline(ptr); 208 return xencomm_create_inline(ptr);
224 209
225 rc = xencomm_create_mini(ptr, bytes, xc_desc, 210 rc = xencomm_create_mini(ptr, bytes, xc_desc,
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index dbda475b13bd..06e157119d2b 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2169,12 +2169,11 @@ void early_boot_irqs_on(void)
2169/* 2169/*
2170 * Hardirqs will be enabled: 2170 * Hardirqs will be enabled:
2171 */ 2171 */
2172void trace_hardirqs_on_caller(unsigned long a0) 2172void trace_hardirqs_on_caller(unsigned long ip)
2173{ 2173{
2174 struct task_struct *curr = current; 2174 struct task_struct *curr = current;
2175 unsigned long ip;
2176 2175
2177 time_hardirqs_on(CALLER_ADDR0, a0); 2176 time_hardirqs_on(CALLER_ADDR0, ip);
2178 2177
2179 if (unlikely(!debug_locks || current->lockdep_recursion)) 2178 if (unlikely(!debug_locks || current->lockdep_recursion))
2180 return; 2179 return;
@@ -2188,7 +2187,6 @@ void trace_hardirqs_on_caller(unsigned long a0)
2188 } 2187 }
2189 /* we'll do an OFF -> ON transition: */ 2188 /* we'll do an OFF -> ON transition: */
2190 curr->hardirqs_enabled = 1; 2189 curr->hardirqs_enabled = 1;
2191 ip = (unsigned long) __builtin_return_address(0);
2192 2190
2193 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2191 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2194 return; 2192 return;
@@ -2224,11 +2222,11 @@ EXPORT_SYMBOL(trace_hardirqs_on);
2224/* 2222/*
2225 * Hardirqs were disabled: 2223 * Hardirqs were disabled:
2226 */ 2224 */
2227void trace_hardirqs_off_caller(unsigned long a0) 2225void trace_hardirqs_off_caller(unsigned long ip)
2228{ 2226{
2229 struct task_struct *curr = current; 2227 struct task_struct *curr = current;
2230 2228
2231 time_hardirqs_off(CALLER_ADDR0, a0); 2229 time_hardirqs_off(CALLER_ADDR0, ip);
2232 2230
2233 if (unlikely(!debug_locks || current->lockdep_recursion)) 2231 if (unlikely(!debug_locks || current->lockdep_recursion))
2234 return; 2232 return;
@@ -2241,7 +2239,7 @@ void trace_hardirqs_off_caller(unsigned long a0)
2241 * We have done an ON -> OFF transition: 2239 * We have done an ON -> OFF transition:
2242 */ 2240 */
2243 curr->hardirqs_enabled = 0; 2241 curr->hardirqs_enabled = 0;
2244 curr->hardirq_disable_ip = _RET_IP_; 2242 curr->hardirq_disable_ip = ip;
2245 curr->hardirq_disable_event = ++curr->irq_events; 2243 curr->hardirq_disable_event = ++curr->irq_events;
2246 debug_atomic_inc(&hardirqs_off_events); 2244 debug_atomic_inc(&hardirqs_off_events);
2247 } else 2245 } else
@@ -3417,9 +3415,10 @@ retry:
3417 } 3415 }
3418 printk(" ignoring it.\n"); 3416 printk(" ignoring it.\n");
3419 unlock = 0; 3417 unlock = 0;
3418 } else {
3419 if (count != 10)
3420 printk(KERN_CONT " locked it.\n");
3420 } 3421 }
3421 if (count != 10)
3422 printk(" locked it.\n");
3423 3422
3424 do_each_thread(g, p) { 3423 do_each_thread(g, p) {
3425 /* 3424 /*
diff --git a/kernel/printk.c b/kernel/printk.c
index 6341af77eb65..f492f1583d77 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -233,45 +233,6 @@ static inline void boot_delay_msec(void)
233#endif 233#endif
234 234
235/* 235/*
236 * Return the number of unread characters in the log buffer.
237 */
238static int log_buf_get_len(void)
239{
240 return logged_chars;
241}
242
243/*
244 * Copy a range of characters from the log buffer.
245 */
246int log_buf_copy(char *dest, int idx, int len)
247{
248 int ret, max;
249 bool took_lock = false;
250
251 if (!oops_in_progress) {
252 spin_lock_irq(&logbuf_lock);
253 took_lock = true;
254 }
255
256 max = log_buf_get_len();
257 if (idx < 0 || idx >= max) {
258 ret = -1;
259 } else {
260 if (len > max)
261 len = max;
262 ret = len;
263 idx += (log_end - max);
264 while (len-- > 0)
265 dest[len] = LOG_BUF(idx + len);
266 }
267
268 if (took_lock)
269 spin_unlock_irq(&logbuf_lock);
270
271 return ret;
272}
273
274/*
275 * Commands to do_syslog: 236 * Commands to do_syslog:
276 * 237 *
277 * 0 -- Close the log. Currently a NOP. 238 * 0 -- Close the log. Currently a NOP.
diff --git a/kernel/resource.c b/kernel/resource.c
index 4089d12af6e0..7fec0e427234 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -571,7 +571,7 @@ static void __init __reserve_region_with_split(struct resource *root,
571 571
572} 572}
573 573
574void reserve_region_with_split(struct resource *root, 574void __init reserve_region_with_split(struct resource *root,
575 resource_size_t start, resource_size_t end, 575 resource_size_t start, resource_size_t end,
576 const char *name) 576 const char *name)
577{ 577{
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index f8eebd489149..78330c37a61b 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -497,8 +497,10 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
497 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 497 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
498 (unsigned long long)*hwdev->dma_mask, 498 (unsigned long long)*hwdev->dma_mask,
499 (unsigned long long)dev_addr); 499 (unsigned long long)dev_addr);
500 panic("swiotlb_alloc_coherent: allocated memory is out of " 500
501 "range for device"); 501 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
502 unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
503 return NULL;
502 } 504 }
503 *dma_handle = dev_addr; 505 *dma_handle = dev_addr;
504 return ret; 506 return ret;