aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-11-11 18:43:02 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-11 18:43:02 -0500
commit7e452baf6b96b5aeba097afd91501d33d390cc97 (patch)
tree9b0e062d3677d50d731ffd0fba47423bfdee9253 /include
parent3ac38c3a2e7dac3f8f35a56eb85c27881a4c3833 (diff)
parentf21f237cf55494c3a4209de323281a3b0528da10 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/message/fusion/mptlan.c drivers/net/sfc/ethtool.c net/mac80211/debugfs_sta.c
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/memory_model.h2
-rw-r--r--include/drm/drmP.h5
-rw-r--r--include/drm/drm_pciids.h46
-rw-r--r--include/linux/cnt32_to_63.h22
-rw-r--r--include/linux/cpumask.h559
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/mmc/card.h2
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmc/sdio_func.h2
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/smp.h9
-rw-r--r--include/linux/ssb/ssb.h42
-rw-r--r--include/linux/telephony.h2
-rw-r--r--include/linux/topology.h4
-rw-r--r--include/linux/workqueue.h8
-rw-r--r--include/net/af_unix.h1
-rw-r--r--include/sound/core.h10
18 files changed, 666 insertions, 54 deletions
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index ae060c62aff1..18546d8eb78e 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -34,7 +34,7 @@
34 34
35#define __pfn_to_page(pfn) \ 35#define __pfn_to_page(pfn) \
36({ unsigned long __pfn = (pfn); \ 36({ unsigned long __pfn = (pfn); \
37 unsigned long __nid = arch_pfn_to_nid(pfn); \ 37 unsigned long __nid = arch_pfn_to_nid(__pfn); \
38 NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ 38 NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
39}) 39})
40 40
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 59c796b46ee7..28c7f1679d49 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -861,8 +861,6 @@ struct drm_device {
861 struct timer_list vblank_disable_timer; 861 struct timer_list vblank_disable_timer;
862 862
863 u32 max_vblank_count; /**< size of vblank counter register */ 863 u32 max_vblank_count; /**< size of vblank counter register */
864 spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
865 void (*locked_tasklet_func)(struct drm_device *dev);
866 864
867 /*@} */ 865 /*@} */
868 cycles_t ctx_start; 866 cycles_t ctx_start;
@@ -1149,8 +1147,6 @@ extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
1149extern int drm_wait_vblank(struct drm_device *dev, void *data, 1147extern int drm_wait_vblank(struct drm_device *dev, void *data,
1150 struct drm_file *filp); 1148 struct drm_file *filp);
1151extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); 1149extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
1152extern void drm_locked_tasklet(struct drm_device *dev,
1153 void(*func)(struct drm_device *));
1154extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 1150extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
1155extern void drm_handle_vblank(struct drm_device *dev, int crtc); 1151extern void drm_handle_vblank(struct drm_device *dev, int crtc);
1156extern int drm_vblank_get(struct drm_device *dev, int crtc); 1152extern int drm_vblank_get(struct drm_device *dev, int crtc);
@@ -1158,7 +1154,6 @@ extern void drm_vblank_put(struct drm_device *dev, int crtc);
1158/* Modesetting support */ 1154/* Modesetting support */
1159extern int drm_modeset_ctl(struct drm_device *dev, void *data, 1155extern int drm_modeset_ctl(struct drm_device *dev, void *data,
1160 struct drm_file *file_priv); 1156 struct drm_file *file_priv);
1161extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
1162 1157
1163 /* AGP/GART support (drm_agpsupport.h) */ 1158 /* AGP/GART support (drm_agpsupport.h) */
1164extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); 1159extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index da04109741e8..5165f240aa68 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -395,27 +395,27 @@
395 {0, 0, 0} 395 {0, 0, 0}
396 396
397#define i915_PCI_IDS \ 397#define i915_PCI_IDS \
398 {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 398 {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
399 {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 399 {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
400 {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 400 {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
401 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 401 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
402 {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 402 {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
403 {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 403 {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
404 {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 404 {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
405 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 405 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
406 {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 406 {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
407 {0x8086, 0x27ae, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 407 {0x8086, 0x27ae, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
408 {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 408 {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
409 {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 409 {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
410 {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 410 {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
411 {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 411 {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
412 {0x8086, 0x29b2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 412 {0x8086, 0x29b2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
413 {0x8086, 0x29c2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 413 {0x8086, 0x29c2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
414 {0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 414 {0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
415 {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 415 {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
416 {0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 416 {0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
417 {0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 417 {0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
418 {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 418 {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
419 {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 419 {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
420 {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 420 {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
421 {0, 0, 0} 421 {0, 0, 0}
diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h
index 8c0f9505b48c..7605fdd1eb65 100644
--- a/include/linux/cnt32_to_63.h
+++ b/include/linux/cnt32_to_63.h
@@ -16,6 +16,7 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/byteorder.h> 18#include <asm/byteorder.h>
19#include <asm/system.h>
19 20
20/* this is used only to give gcc a clue about good code generation */ 21/* this is used only to give gcc a clue about good code generation */
21union cnt32_to_63 { 22union cnt32_to_63 {
@@ -53,11 +54,19 @@ union cnt32_to_63 {
53 * needed increment. And any race in updating the value in memory is harmless 54 * needed increment. And any race in updating the value in memory is harmless
54 * as the same value would simply be stored more than once. 55 * as the same value would simply be stored more than once.
55 * 56 *
56 * The only restriction for the algorithm to work properly is that this 57 * The restrictions for the algorithm to work properly are:
57 * code must be executed at least once per each half period of the 32-bit 58 *
58 * counter to properly update the state bit in memory. This is usually not a 59 * 1) this code must be called at least once per each half period of the
59 * problem in practice, but if it is then a kernel timer could be scheduled 60 * 32-bit counter;
60 * to manage for this code to be executed often enough. 61 *
62 * 2) this code must not be preempted for a duration longer than the
63 * 32-bit counter half period minus the longest period between two
64 * calls to this code.
65 *
66 * Those requirements ensure proper update to the state bit in memory.
67 * This is usually not a problem in practice, but if it is then a kernel
68 * timer should be scheduled to manage for this code to be executed often
69 * enough.
61 * 70 *
62 * Note that the top bit (bit 63) in the returned value should be considered 71 * Note that the top bit (bit 63) in the returned value should be considered
63 * as garbage. It is not cleared here because callers are likely to use a 72 * as garbage. It is not cleared here because callers are likely to use a
@@ -68,9 +77,10 @@ union cnt32_to_63 {
68 */ 77 */
69#define cnt32_to_63(cnt_lo) \ 78#define cnt32_to_63(cnt_lo) \
70({ \ 79({ \
71 static volatile u32 __m_cnt_hi; \ 80 static u32 __m_cnt_hi; \
72 union cnt32_to_63 __x; \ 81 union cnt32_to_63 __x; \
73 __x.hi = __m_cnt_hi; \ 82 __x.hi = __m_cnt_hi; \
83 smp_rmb(); \
74 __x.lo = (cnt_lo); \ 84 __x.lo = (cnt_lo); \
75 if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \ 85 if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \
76 __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \ 86 __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index d3219d73f8e6..21e1dd43e52a 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -5,6 +5,9 @@
5 * Cpumasks provide a bitmap suitable for representing the 5 * Cpumasks provide a bitmap suitable for representing the
6 * set of CPU's in a system, one bit position per CPU number. 6 * set of CPU's in a system, one bit position per CPU number.
7 * 7 *
8 * The new cpumask_ ops take a "struct cpumask *"; the old ones
9 * use cpumask_t.
10 *
8 * See detailed comments in the file linux/bitmap.h describing the 11 * See detailed comments in the file linux/bitmap.h describing the
9 * data type on which these cpumasks are based. 12 * data type on which these cpumasks are based.
10 * 13 *
@@ -31,7 +34,7 @@
31 * will span the entire range of NR_CPUS. 34 * will span the entire range of NR_CPUS.
32 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 35 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
33 * 36 *
34 * The available cpumask operations are: 37 * The obsolescent cpumask operations are:
35 * 38 *
36 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask 39 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask
37 * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask 40 * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask
@@ -138,7 +141,7 @@
138#include <linux/threads.h> 141#include <linux/threads.h>
139#include <linux/bitmap.h> 142#include <linux/bitmap.h>
140 143
141typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 144typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
142extern cpumask_t _unused_cpumask_arg_; 145extern cpumask_t _unused_cpumask_arg_;
143 146
144#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) 147#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
@@ -527,4 +530,556 @@ extern cpumask_t cpu_active_map;
527#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) 530#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map)
528#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) 531#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map)
529 532
533/* These are the new versions of the cpumask operators: passed by pointer.
534 * The older versions will be implemented in terms of these, then deleted. */
535#define cpumask_bits(maskp) ((maskp)->bits)
536
537#if NR_CPUS <= BITS_PER_LONG
538#define CPU_BITS_ALL \
539{ \
540 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
541}
542
543/* This produces more efficient code. */
544#define nr_cpumask_bits NR_CPUS
545
546#else /* NR_CPUS > BITS_PER_LONG */
547
548#define CPU_BITS_ALL \
549{ \
550 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
551 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
552}
553
554#define nr_cpumask_bits nr_cpu_ids
555#endif /* NR_CPUS > BITS_PER_LONG */
556
557/* verify cpu argument to cpumask_* operators */
558static inline unsigned int cpumask_check(unsigned int cpu)
559{
560#ifdef CONFIG_DEBUG_PER_CPU_MAPS
561 WARN_ON_ONCE(cpu >= nr_cpumask_bits);
562#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
563 return cpu;
564}
565
566#if NR_CPUS == 1
567/* Uniprocessor. Assume all masks are "1". */
568static inline unsigned int cpumask_first(const struct cpumask *srcp)
569{
570 return 0;
571}
572
573/* Valid inputs for n are -1 and 0. */
574static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
575{
576 return n+1;
577}
578
579static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
580{
581 return n+1;
582}
583
584static inline unsigned int cpumask_next_and(int n,
585 const struct cpumask *srcp,
586 const struct cpumask *andp)
587{
588 return n+1;
589}
590
591/* cpu must be a valid cpu, ie 0, so there's no other choice. */
592static inline unsigned int cpumask_any_but(const struct cpumask *mask,
593 unsigned int cpu)
594{
595 return 1;
596}
597
598#define for_each_cpu(cpu, mask) \
599 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
600#define for_each_cpu_and(cpu, mask, and) \
601 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
602#else
603/**
604 * cpumask_first - get the first cpu in a cpumask
605 * @srcp: the cpumask pointer
606 *
607 * Returns >= nr_cpu_ids if no cpus set.
608 */
609static inline unsigned int cpumask_first(const struct cpumask *srcp)
610{
611 return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
612}
613
614/**
615 * cpumask_next - get the next cpu in a cpumask
616 * @n: the cpu prior to the place to search (ie. return will be > @n)
617 * @srcp: the cpumask pointer
618 *
619 * Returns >= nr_cpu_ids if no further cpus set.
620 */
621static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
622{
623 /* -1 is a legal arg here. */
624 if (n != -1)
625 cpumask_check(n);
626 return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
627}
628
629/**
630 * cpumask_next_zero - get the next unset cpu in a cpumask
631 * @n: the cpu prior to the place to search (ie. return will be > @n)
632 * @srcp: the cpumask pointer
633 *
634 * Returns >= nr_cpu_ids if no further cpus unset.
635 */
636static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
637{
638 /* -1 is a legal arg here. */
639 if (n != -1)
640 cpumask_check(n);
641 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
642}
643
644int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
645int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
646
647/**
648 * for_each_cpu - iterate over every cpu in a mask
649 * @cpu: the (optionally unsigned) integer iterator
650 * @mask: the cpumask pointer
651 *
652 * After the loop, cpu is >= nr_cpu_ids.
653 */
654#define for_each_cpu(cpu, mask) \
655 for ((cpu) = -1; \
656 (cpu) = cpumask_next((cpu), (mask)), \
657 (cpu) < nr_cpu_ids;)
658
659/**
660 * for_each_cpu_and - iterate over every cpu in both masks
661 * @cpu: the (optionally unsigned) integer iterator
662 * @mask: the first cpumask pointer
663 * @and: the second cpumask pointer
664 *
665 * This saves a temporary CPU mask in many places. It is equivalent to:
666 * struct cpumask tmp;
667 * cpumask_and(&tmp, &mask, &and);
668 * for_each_cpu(cpu, &tmp)
669 * ...
670 *
671 * After the loop, cpu is >= nr_cpu_ids.
672 */
673#define for_each_cpu_and(cpu, mask, and) \
674 for ((cpu) = -1; \
675 (cpu) = cpumask_next_and((cpu), (mask), (and)), \
676 (cpu) < nr_cpu_ids;)
677#endif /* SMP */
678
679#define CPU_BITS_NONE \
680{ \
681 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
682}
683
684#define CPU_BITS_CPU0 \
685{ \
686 [0] = 1UL \
687}
688
689/**
690 * cpumask_set_cpu - set a cpu in a cpumask
691 * @cpu: cpu number (< nr_cpu_ids)
692 * @dstp: the cpumask pointer
693 */
694static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
695{
696 set_bit(cpumask_check(cpu), cpumask_bits(dstp));
697}
698
699/**
700 * cpumask_clear_cpu - clear a cpu in a cpumask
701 * @cpu: cpu number (< nr_cpu_ids)
702 * @dstp: the cpumask pointer
703 */
704static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
705{
706 clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
707}
708
709/**
710 * cpumask_test_cpu - test for a cpu in a cpumask
711 * @cpu: cpu number (< nr_cpu_ids)
712 * @cpumask: the cpumask pointer
713 *
714 * No static inline type checking - see Subtlety (1) above.
715 */
716#define cpumask_test_cpu(cpu, cpumask) \
717 test_bit(cpumask_check(cpu), (cpumask)->bits)
718
719/**
720 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
721 * @cpu: cpu number (< nr_cpu_ids)
722 * @cpumask: the cpumask pointer
723 *
724 * test_and_set_bit wrapper for cpumasks.
725 */
726static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
727{
728 return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
729}
730
731/**
732 * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
733 * @dstp: the cpumask pointer
734 */
735static inline void cpumask_setall(struct cpumask *dstp)
736{
737 bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
738}
739
740/**
741 * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
742 * @dstp: the cpumask pointer
743 */
744static inline void cpumask_clear(struct cpumask *dstp)
745{
746 bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
747}
748
749/**
750 * cpumask_and - *dstp = *src1p & *src2p
751 * @dstp: the cpumask result
752 * @src1p: the first input
753 * @src2p: the second input
754 */
755static inline void cpumask_and(struct cpumask *dstp,
756 const struct cpumask *src1p,
757 const struct cpumask *src2p)
758{
759 bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
760 cpumask_bits(src2p), nr_cpumask_bits);
761}
762
763/**
764 * cpumask_or - *dstp = *src1p | *src2p
765 * @dstp: the cpumask result
766 * @src1p: the first input
767 * @src2p: the second input
768 */
769static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
770 const struct cpumask *src2p)
771{
772 bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
773 cpumask_bits(src2p), nr_cpumask_bits);
774}
775
776/**
777 * cpumask_xor - *dstp = *src1p ^ *src2p
778 * @dstp: the cpumask result
779 * @src1p: the first input
780 * @src2p: the second input
781 */
782static inline void cpumask_xor(struct cpumask *dstp,
783 const struct cpumask *src1p,
784 const struct cpumask *src2p)
785{
786 bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
787 cpumask_bits(src2p), nr_cpumask_bits);
788}
789
790/**
791 * cpumask_andnot - *dstp = *src1p & ~*src2p
792 * @dstp: the cpumask result
793 * @src1p: the first input
794 * @src2p: the second input
795 */
796static inline void cpumask_andnot(struct cpumask *dstp,
797 const struct cpumask *src1p,
798 const struct cpumask *src2p)
799{
800 bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
801 cpumask_bits(src2p), nr_cpumask_bits);
802}
803
804/**
805 * cpumask_complement - *dstp = ~*srcp
806 * @dstp: the cpumask result
807 * @srcp: the input to invert
808 */
809static inline void cpumask_complement(struct cpumask *dstp,
810 const struct cpumask *srcp)
811{
812 bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
813 nr_cpumask_bits);
814}
815
816/**
817 * cpumask_equal - *src1p == *src2p
818 * @src1p: the first input
819 * @src2p: the second input
820 */
821static inline bool cpumask_equal(const struct cpumask *src1p,
822 const struct cpumask *src2p)
823{
824 return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
825 nr_cpumask_bits);
826}
827
828/**
829 * cpumask_intersects - (*src1p & *src2p) != 0
830 * @src1p: the first input
831 * @src2p: the second input
832 */
833static inline bool cpumask_intersects(const struct cpumask *src1p,
834 const struct cpumask *src2p)
835{
836 return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
837 nr_cpumask_bits);
838}
839
840/**
841 * cpumask_subset - (*src1p & ~*src2p) == 0
842 * @src1p: the first input
843 * @src2p: the second input
844 */
845static inline int cpumask_subset(const struct cpumask *src1p,
846 const struct cpumask *src2p)
847{
848 return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
849 nr_cpumask_bits);
850}
851
852/**
853 * cpumask_empty - *srcp == 0
854 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
855 */
856static inline bool cpumask_empty(const struct cpumask *srcp)
857{
858 return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
859}
860
861/**
862 * cpumask_full - *srcp == 0xFFFFFFFF...
863 * @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
864 */
865static inline bool cpumask_full(const struct cpumask *srcp)
866{
867 return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
868}
869
870/**
871 * cpumask_weight - Count of bits in *srcp
872 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
873 */
874static inline unsigned int cpumask_weight(const struct cpumask *srcp)
875{
876 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
877}
878
879/**
880 * cpumask_shift_right - *dstp = *srcp >> n
881 * @dstp: the cpumask result
882 * @srcp: the input to shift
883 * @n: the number of bits to shift by
884 */
885static inline void cpumask_shift_right(struct cpumask *dstp,
886 const struct cpumask *srcp, int n)
887{
888 bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
889 nr_cpumask_bits);
890}
891
892/**
893 * cpumask_shift_left - *dstp = *srcp << n
894 * @dstp: the cpumask result
895 * @srcp: the input to shift
896 * @n: the number of bits to shift by
897 */
898static inline void cpumask_shift_left(struct cpumask *dstp,
899 const struct cpumask *srcp, int n)
900{
901 bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
902 nr_cpumask_bits);
903}
904
905/**
906 * cpumask_copy - *dstp = *srcp
907 * @dstp: the result
908 * @srcp: the input cpumask
909 */
910static inline void cpumask_copy(struct cpumask *dstp,
911 const struct cpumask *srcp)
912{
913 bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
914}
915
916/**
917 * cpumask_any - pick a "random" cpu from *srcp
918 * @srcp: the input cpumask
919 *
920 * Returns >= nr_cpu_ids if no cpus set.
921 */
922#define cpumask_any(srcp) cpumask_first(srcp)
923
924/**
925 * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
926 * @src1p: the first input
927 * @src2p: the second input
928 *
929 * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
930 */
931#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
932
933/**
934 * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
935 * @mask1: the first input cpumask
936 * @mask2: the second input cpumask
937 *
938 * Returns >= nr_cpu_ids if no cpus set.
939 */
940#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
941
942/**
943 * cpumask_of - the cpumask containing just a given cpu
944 * @cpu: the cpu (<= nr_cpu_ids)
945 */
946#define cpumask_of(cpu) (get_cpu_mask(cpu))
947
948/**
949 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
950 * @bitmap: the bitmap
951 *
952 * There are a few places where cpumask_var_t isn't appropriate and
953 * static cpumasks must be used (eg. very early boot), yet we don't
954 * expose the definition of 'struct cpumask'.
955 *
956 * This does the conversion, and can be used as a constant initializer.
957 */
958#define to_cpumask(bitmap) \
959 ((struct cpumask *)(1 ? (bitmap) \
960 : (void *)sizeof(__check_is_bitmap(bitmap))))
961
962static inline int __check_is_bitmap(const unsigned long *bitmap)
963{
964 return 1;
965}
966
967/**
968 * cpumask_size - size to allocate for a 'struct cpumask' in bytes
969 *
970 * This will eventually be a runtime variable, depending on nr_cpu_ids.
971 */
972static inline size_t cpumask_size(void)
973{
974 /* FIXME: Once all cpumask assignments are eliminated, this
975 * can be nr_cpumask_bits */
976 return BITS_TO_LONGS(NR_CPUS) * sizeof(long);
977}
978
979/*
980 * cpumask_var_t: struct cpumask for stack usage.
981 *
982 * Oh, the wicked games we play! In order to make kernel coding a
983 * little more difficult, we typedef cpumask_var_t to an array or a
984 * pointer: doing &mask on an array is a noop, so it still works.
985 *
986 * ie.
987 * cpumask_var_t tmpmask;
988 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
989 * return -ENOMEM;
990 *
991 * ... use 'tmpmask' like a normal struct cpumask * ...
992 *
993 * free_cpumask_var(tmpmask);
994 */
995#ifdef CONFIG_CPUMASK_OFFSTACK
996typedef struct cpumask *cpumask_var_t;
997
998bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
999void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
1000void free_cpumask_var(cpumask_var_t mask);
1001void free_bootmem_cpumask_var(cpumask_var_t mask);
1002
1003#else
1004typedef struct cpumask cpumask_var_t[1];
1005
1006static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
1007{
1008 return true;
1009}
1010
1011static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
1012{
1013}
1014
1015static inline void free_cpumask_var(cpumask_var_t mask)
1016{
1017}
1018
1019static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
1020{
1021}
1022#endif /* CONFIG_CPUMASK_OFFSTACK */
1023
1024/* The pointer versions of the maps, these will become the primary versions. */
1025#define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map)
1026#define cpu_online_mask ((const struct cpumask *)&cpu_online_map)
1027#define cpu_present_mask ((const struct cpumask *)&cpu_present_map)
1028#define cpu_active_mask ((const struct cpumask *)&cpu_active_map)
1029
1030/* It's common to want to use cpu_all_mask in struct member initializers,
1031 * so it has to refer to an address rather than a pointer. */
1032extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
1033#define cpu_all_mask to_cpumask(cpu_all_bits)
1034
1035/* First bits of cpu_bit_bitmap are in fact unset. */
1036#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
1037
1038/* Wrappers for arch boot code to manipulate normally-constant masks */
1039static inline void set_cpu_possible(unsigned int cpu, bool possible)
1040{
1041 if (possible)
1042 cpumask_set_cpu(cpu, &cpu_possible_map);
1043 else
1044 cpumask_clear_cpu(cpu, &cpu_possible_map);
1045}
1046
1047static inline void set_cpu_present(unsigned int cpu, bool present)
1048{
1049 if (present)
1050 cpumask_set_cpu(cpu, &cpu_present_map);
1051 else
1052 cpumask_clear_cpu(cpu, &cpu_present_map);
1053}
1054
1055static inline void set_cpu_online(unsigned int cpu, bool online)
1056{
1057 if (online)
1058 cpumask_set_cpu(cpu, &cpu_online_map);
1059 else
1060 cpumask_clear_cpu(cpu, &cpu_online_map);
1061}
1062
1063static inline void set_cpu_active(unsigned int cpu, bool active)
1064{
1065 if (active)
1066 cpumask_set_cpu(cpu, &cpu_active_map);
1067 else
1068 cpumask_clear_cpu(cpu, &cpu_active_map);
1069}
1070
1071static inline void init_cpu_present(const struct cpumask *src)
1072{
1073 cpumask_copy(&cpu_present_map, src);
1074}
1075
1076static inline void init_cpu_possible(const struct cpumask *src)
1077{
1078 cpumask_copy(&cpu_possible_map, src);
1079}
1080
1081static inline void init_cpu_online(const struct cpumask *src)
1082{
1083 cpumask_copy(&cpu_online_map, src);
1084}
530#endif /* __LINUX_CPUMASK_H */ 1085#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index c7665a4134c5..59b0f1c807b5 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -698,6 +698,7 @@ struct ata_port {
698 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 698 unsigned int cbl; /* cable type; ATA_CBL_xxx */
699 699
700 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; 700 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
701 unsigned long qc_allocated;
701 unsigned int qc_active; 702 unsigned int qc_active;
702 int nr_active_links; /* #links with active qcs */ 703 int nr_active_links; /* #links with active qcs */
703 704
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index ee6e822d5994..403aa505f27e 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -130,7 +130,7 @@ struct mmc_card {
130#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) 130#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
131 131
132#define mmc_card_name(c) ((c)->cid.prod_name) 132#define mmc_card_name(c) ((c)->cid.prod_name)
133#define mmc_card_id(c) ((c)->dev.bus_id) 133#define mmc_card_id(c) (dev_name(&(c)->dev))
134 134
135#define mmc_list_to_card(l) container_of(l, struct mmc_card, node) 135#define mmc_list_to_card(l) container_of(l, struct mmc_card, node)
136#define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev) 136#define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index bde891f64591..f842f234e44f 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -176,7 +176,7 @@ static inline void *mmc_priv(struct mmc_host *host)
176 176
177#define mmc_dev(x) ((x)->parent) 177#define mmc_dev(x) ((x)->parent)
178#define mmc_classdev(x) (&(x)->class_dev) 178#define mmc_classdev(x) (&(x)->class_dev)
179#define mmc_hostname(x) ((x)->class_dev.bus_id) 179#define mmc_hostname(x) (dev_name(&(x)->class_dev))
180 180
181extern int mmc_suspend_host(struct mmc_host *, pm_message_t); 181extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
182extern int mmc_resume_host(struct mmc_host *); 182extern int mmc_resume_host(struct mmc_host *);
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index 07bee4a0d457..451bdfc85830 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -63,7 +63,7 @@ struct sdio_func {
63 63
64#define sdio_func_set_present(f) ((f)->state |= SDIO_STATE_PRESENT) 64#define sdio_func_set_present(f) ((f)->state |= SDIO_STATE_PRESENT)
65 65
66#define sdio_func_id(f) ((f)->dev.bus_id) 66#define sdio_func_id(f) (dev_name(&(f)->dev))
67 67
68#define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev) 68#define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev)
69#define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d) 69#define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c75b82bda327..feb4657bb043 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1136,7 +1136,7 @@ static inline void pci_mmcfg_late_init(void) { }
1136#endif 1136#endif
1137 1137
1138#ifdef CONFIG_HAS_IOMEM 1138#ifdef CONFIG_HAS_IOMEM
1139static inline void * pci_ioremap_bar(struct pci_dev *pdev, int bar) 1139static inline void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
1140{ 1140{
1141 /* 1141 /*
1142 * Make sure the BAR is actually a memory resource, not an IO resource 1142 * Make sure the BAR is actually a memory resource, not an IO resource
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 295b7c756ca6..644ffbda17ca 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -247,6 +247,7 @@ extern void init_idle(struct task_struct *idle, int cpu);
247extern void init_idle_bootup_task(struct task_struct *idle); 247extern void init_idle_bootup_task(struct task_struct *idle);
248 248
249extern int runqueue_is_locked(void); 249extern int runqueue_is_locked(void);
250extern void task_rq_unlock_wait(struct task_struct *p);
250 251
251extern cpumask_t nohz_cpu_mask; 252extern cpumask_t nohz_cpu_mask;
252#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 253#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 2e4d58b26c06..3f9a60043a97 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -64,8 +64,17 @@ extern void smp_cpus_done(unsigned int max_cpus);
64 * Call a function on all other processors 64 * Call a function on all other processors
65 */ 65 */
66int smp_call_function(void(*func)(void *info), void *info, int wait); 66int smp_call_function(void(*func)(void *info), void *info, int wait);
67/* Deprecated: use smp_call_function_many() which uses a cpumask ptr. */
67int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, 68int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
68 int wait); 69 int wait);
70
71static inline void smp_call_function_many(const struct cpumask *mask,
72 void (*func)(void *info), void *info,
73 int wait)
74{
75 smp_call_function_mask(*mask, func, info, wait);
76}
77
69int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, 78int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
70 int wait); 79 int wait);
71void __smp_call_function_single(int cpuid, struct call_single_data *data); 80void __smp_call_function_single(int cpuid, struct call_single_data *data);
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index e530026eedf7..17d9b58f6379 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -427,12 +427,16 @@ static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr)
427{ 427{
428 switch (dev->bus->bustype) { 428 switch (dev->bus->bustype) {
429 case SSB_BUSTYPE_PCI: 429 case SSB_BUSTYPE_PCI:
430#ifdef CONFIG_SSB_PCIHOST
430 return pci_dma_mapping_error(dev->bus->host_pci, addr); 431 return pci_dma_mapping_error(dev->bus->host_pci, addr);
432#endif
433 break;
431 case SSB_BUSTYPE_SSB: 434 case SSB_BUSTYPE_SSB:
432 return dma_mapping_error(dev->dev, addr); 435 return dma_mapping_error(dev->dev, addr);
433 default: 436 default:
434 __ssb_dma_not_implemented(dev); 437 break;
435 } 438 }
439 __ssb_dma_not_implemented(dev);
436 return -ENOSYS; 440 return -ENOSYS;
437} 441}
438 442
@@ -441,12 +445,16 @@ static inline dma_addr_t ssb_dma_map_single(struct ssb_device *dev, void *p,
441{ 445{
442 switch (dev->bus->bustype) { 446 switch (dev->bus->bustype) {
443 case SSB_BUSTYPE_PCI: 447 case SSB_BUSTYPE_PCI:
448#ifdef CONFIG_SSB_PCIHOST
444 return pci_map_single(dev->bus->host_pci, p, size, dir); 449 return pci_map_single(dev->bus->host_pci, p, size, dir);
450#endif
451 break;
445 case SSB_BUSTYPE_SSB: 452 case SSB_BUSTYPE_SSB:
446 return dma_map_single(dev->dev, p, size, dir); 453 return dma_map_single(dev->dev, p, size, dir);
447 default: 454 default:
448 __ssb_dma_not_implemented(dev); 455 break;
449 } 456 }
457 __ssb_dma_not_implemented(dev);
450 return 0; 458 return 0;
451} 459}
452 460
@@ -455,14 +463,18 @@ static inline void ssb_dma_unmap_single(struct ssb_device *dev, dma_addr_t dma_a
455{ 463{
456 switch (dev->bus->bustype) { 464 switch (dev->bus->bustype) {
457 case SSB_BUSTYPE_PCI: 465 case SSB_BUSTYPE_PCI:
466#ifdef CONFIG_SSB_PCIHOST
458 pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir); 467 pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir);
459 return; 468 return;
469#endif
470 break;
460 case SSB_BUSTYPE_SSB: 471 case SSB_BUSTYPE_SSB:
461 dma_unmap_single(dev->dev, dma_addr, size, dir); 472 dma_unmap_single(dev->dev, dma_addr, size, dir);
462 return; 473 return;
463 default: 474 default:
464 __ssb_dma_not_implemented(dev); 475 break;
465 } 476 }
477 __ssb_dma_not_implemented(dev);
466} 478}
467 479
468static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev, 480static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev,
@@ -472,15 +484,19 @@ static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev,
472{ 484{
473 switch (dev->bus->bustype) { 485 switch (dev->bus->bustype) {
474 case SSB_BUSTYPE_PCI: 486 case SSB_BUSTYPE_PCI:
487#ifdef CONFIG_SSB_PCIHOST
475 pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr, 488 pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
476 size, dir); 489 size, dir);
477 return; 490 return;
491#endif
492 break;
478 case SSB_BUSTYPE_SSB: 493 case SSB_BUSTYPE_SSB:
479 dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir); 494 dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir);
480 return; 495 return;
481 default: 496 default:
482 __ssb_dma_not_implemented(dev); 497 break;
483 } 498 }
499 __ssb_dma_not_implemented(dev);
484} 500}
485 501
486static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev, 502static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev,
@@ -490,15 +506,19 @@ static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev,
490{ 506{
491 switch (dev->bus->bustype) { 507 switch (dev->bus->bustype) {
492 case SSB_BUSTYPE_PCI: 508 case SSB_BUSTYPE_PCI:
509#ifdef CONFIG_SSB_PCIHOST
493 pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr, 510 pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
494 size, dir); 511 size, dir);
495 return; 512 return;
513#endif
514 break;
496 case SSB_BUSTYPE_SSB: 515 case SSB_BUSTYPE_SSB:
497 dma_sync_single_for_device(dev->dev, dma_addr, size, dir); 516 dma_sync_single_for_device(dev->dev, dma_addr, size, dir);
498 return; 517 return;
499 default: 518 default:
500 __ssb_dma_not_implemented(dev); 519 break;
501 } 520 }
521 __ssb_dma_not_implemented(dev);
502} 522}
503 523
504static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev, 524static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev,
@@ -509,17 +529,21 @@ static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev,
509{ 529{
510 switch (dev->bus->bustype) { 530 switch (dev->bus->bustype) {
511 case SSB_BUSTYPE_PCI: 531 case SSB_BUSTYPE_PCI:
532#ifdef CONFIG_SSB_PCIHOST
512 /* Just sync everything. That's all the PCI API can do. */ 533 /* Just sync everything. That's all the PCI API can do. */
513 pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr, 534 pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
514 offset + size, dir); 535 offset + size, dir);
515 return; 536 return;
537#endif
538 break;
516 case SSB_BUSTYPE_SSB: 539 case SSB_BUSTYPE_SSB:
517 dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset, 540 dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset,
518 size, dir); 541 size, dir);
519 return; 542 return;
520 default: 543 default:
521 __ssb_dma_not_implemented(dev); 544 break;
522 } 545 }
546 __ssb_dma_not_implemented(dev);
523} 547}
524 548
525static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev, 549static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev,
@@ -530,17 +554,21 @@ static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev,
530{ 554{
531 switch (dev->bus->bustype) { 555 switch (dev->bus->bustype) {
532 case SSB_BUSTYPE_PCI: 556 case SSB_BUSTYPE_PCI:
557#ifdef CONFIG_SSB_PCIHOST
533 /* Just sync everything. That's all the PCI API can do. */ 558 /* Just sync everything. That's all the PCI API can do. */
534 pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr, 559 pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
535 offset + size, dir); 560 offset + size, dir);
536 return; 561 return;
562#endif
563 break;
537 case SSB_BUSTYPE_SSB: 564 case SSB_BUSTYPE_SSB:
538 dma_sync_single_range_for_device(dev->dev, dma_addr, offset, 565 dma_sync_single_range_for_device(dev->dev, dma_addr, offset,
539 size, dir); 566 size, dir);
540 return; 567 return;
541 default: 568 default:
542 __ssb_dma_not_implemented(dev); 569 break;
543 } 570 }
571 __ssb_dma_not_implemented(dev);
544} 572}
545 573
546 574
diff --git a/include/linux/telephony.h b/include/linux/telephony.h
index 5b2b6261f193..f63afe330add 100644
--- a/include/linux/telephony.h
+++ b/include/linux/telephony.h
@@ -14,7 +14,7 @@
14 * Authors: Ed Okerson, <eokerson@quicknet.net> 14 * Authors: Ed Okerson, <eokerson@quicknet.net>
15 * Greg Herlein, <gherlein@quicknet.net> 15 * Greg Herlein, <gherlein@quicknet.net>
16 * 16 *
17 * Contributors: Alan Cox, <alan@redhat.com> 17 * Contributors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
18 * David W. Erhart, <derhart@quicknet.net> 18 * David W. Erhart, <derhart@quicknet.net>
19 * 19 *
20 * IN NO EVENT SHALL QUICKNET TECHNOLOGIES, INC. BE LIABLE TO ANY PARTY FOR 20 * IN NO EVENT SHALL QUICKNET TECHNOLOGIES, INC. BE LIABLE TO ANY PARTY FOR
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 34a7ee0ebed2..117f1b7405cf 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -99,7 +99,7 @@ void arch_update_cpu_topology(void);
99 | SD_BALANCE_FORK \ 99 | SD_BALANCE_FORK \
100 | SD_BALANCE_EXEC \ 100 | SD_BALANCE_EXEC \
101 | SD_WAKE_AFFINE \ 101 | SD_WAKE_AFFINE \
102 | SD_WAKE_IDLE \ 102 | SD_WAKE_BALANCE \
103 | SD_SHARE_CPUPOWER, \ 103 | SD_SHARE_CPUPOWER, \
104 .last_balance = jiffies, \ 104 .last_balance = jiffies, \
105 .balance_interval = 1, \ 105 .balance_interval = 1, \
@@ -120,10 +120,10 @@ void arch_update_cpu_topology(void);
120 .wake_idx = 1, \ 120 .wake_idx = 1, \
121 .forkexec_idx = 1, \ 121 .forkexec_idx = 1, \
122 .flags = SD_LOAD_BALANCE \ 122 .flags = SD_LOAD_BALANCE \
123 | SD_BALANCE_NEWIDLE \
124 | SD_BALANCE_FORK \ 123 | SD_BALANCE_FORK \
125 | SD_BALANCE_EXEC \ 124 | SD_BALANCE_EXEC \
126 | SD_WAKE_AFFINE \ 125 | SD_WAKE_AFFINE \
126 | SD_WAKE_BALANCE \
127 | SD_SHARE_PKG_RESOURCES\ 127 | SD_SHARE_PKG_RESOURCES\
128 | BALANCE_FOR_MC_POWER, \ 128 | BALANCE_FOR_MC_POWER, \
129 .last_balance = jiffies, \ 129 .last_balance = jiffies, \
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 89a5a1231ffb..b36291130f22 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -240,4 +240,12 @@ void cancel_rearming_delayed_work(struct delayed_work *work)
240 cancel_delayed_work_sync(work); 240 cancel_delayed_work_sync(work);
241} 241}
242 242
243#ifndef CONFIG_SMP
244static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
245{
246 return fn(arg);
247}
248#else
249long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
250#endif /* CONFIG_SMP */
243#endif 251#endif
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 7dd29b7e461d..c29ff1da8a18 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -54,6 +54,7 @@ struct unix_sock {
54 atomic_long_t inflight; 54 atomic_long_t inflight;
55 spinlock_t lock; 55 spinlock_t lock;
56 unsigned int gc_candidate : 1; 56 unsigned int gc_candidate : 1;
57 unsigned int gc_maybe_cycle : 1;
57 wait_queue_head_t peer_wait; 58 wait_queue_head_t peer_wait;
58}; 59};
59#define unix_sk(__sk) ((struct unix_sock *)__sk) 60#define unix_sk(__sk) ((struct unix_sock *)__sk)
diff --git a/include/sound/core.h b/include/sound/core.h
index 35424a971b7a..1508c4ec1ba9 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -385,9 +385,13 @@ void snd_verbose_printd(const char *file, int line, const char *format, ...)
385 385
386#else /* !CONFIG_SND_DEBUG */ 386#else /* !CONFIG_SND_DEBUG */
387 387
388#define snd_printd(fmt, args...) /* nothing */ 388#define snd_printd(fmt, args...) do { } while (0)
389#define snd_BUG() /* nothing */ 389#define snd_BUG() do { } while (0)
390#define snd_BUG_ON(cond) ({/*(void)(cond);*/ 0;}) /* always false */ 390static inline int __snd_bug_on(void)
391{
392 return 0;
393}
394#define snd_BUG_ON(cond) __snd_bug_on() /* always false */
391 395
392#endif /* CONFIG_SND_DEBUG */ 396#endif /* CONFIG_SND_DEBUG */
393 397