aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-10-08 17:56:41 -0400
committerDavid S. Miller <davem@davemloft.net>2008-10-08 17:56:41 -0400
commit4dd565134ece7e5d528d4c5288879310c54419e9 (patch)
treee08910d2d0feae0c030f8f01acc9b03eb760ad9a /include
parent071d7ab6649eb34a873a53e71635186e9117101d (diff)
parent69849375d6b13e94d08cdc94b49b11fbab454a0e (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/e1000e/ich8lan.c drivers/net/e1000e/netdev.c
Diffstat (limited to 'include')
-rw-r--r--include/asm-mips/cevt-r4k.h46
-rw-r--r--include/asm-mips/irqflags.h26
-rw-r--r--include/asm-mips/mipsregs.h6
-rw-r--r--include/asm-mips/smtc.h8
-rw-r--r--include/asm-mips/sn/mapped_kernel.h8
-rw-r--r--include/asm-mips/stackframe.h72
-rw-r--r--include/asm-x86/uaccess_64.h1
-rw-r--r--include/linux/hrtimer.h18
-rw-r--r--include/linux/ide.h4
-rw-r--r--include/linux/ramfs.h1
-rw-r--r--include/linux/stacktrace.h2
11 files changed, 163 insertions, 29 deletions
diff --git a/include/asm-mips/cevt-r4k.h b/include/asm-mips/cevt-r4k.h
new file mode 100644
index 000000000000..fa4328f9124f
--- /dev/null
+++ b/include/asm-mips/cevt-r4k.h
@@ -0,0 +1,46 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Kevin D. Kissell
7 */
8
9/*
10 * Definitions used for common event timer implementation
11 * for MIPS 4K-type processors and their MIPS MT variants.
12 * Avoids unsightly extern declarations in C files.
13 */
14#ifndef __ASM_CEVT_R4K_H
15#define __ASM_CEVT_R4K_H
16
17DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
18
19void mips_event_handler(struct clock_event_device *dev);
20int c0_compare_int_usable(void);
21void mips_set_clock_mode(enum clock_event_mode, struct clock_event_device *);
22irqreturn_t c0_compare_interrupt(int, void *);
23
24extern struct irqaction c0_compare_irqaction;
25extern int cp0_timer_irq_installed;
26
27/*
28 * Possibly handle a performance counter interrupt.
29 * Return true if the timer interrupt should not be checked
30 */
31
32static inline int handle_perf_irq(int r2)
33{
34 /*
35 * The performance counter overflow interrupt may be shared with the
36 * timer interrupt (cp0_perfcount_irq < 0). If it is and a
37 * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
38 * and we can't reliably determine if a counter interrupt has also
39 * happened (!r2) then don't check for a timer interrupt.
40 */
41 return (cp0_perfcount_irq < 0) &&
42 perf_irq() == IRQ_HANDLED &&
43 !r2;
44}
45
46#endif /* __ASM_CEVT_R4K_H */
diff --git a/include/asm-mips/irqflags.h b/include/asm-mips/irqflags.h
index 881e8866501d..701ec0ba8fa9 100644
--- a/include/asm-mips/irqflags.h
+++ b/include/asm-mips/irqflags.h
@@ -38,8 +38,17 @@ __asm__(
38 " .set pop \n" 38 " .set pop \n"
39 " .endm"); 39 " .endm");
40 40
41extern void smtc_ipi_replay(void);
42
41static inline void raw_local_irq_enable(void) 43static inline void raw_local_irq_enable(void)
42{ 44{
45#ifdef CONFIG_MIPS_MT_SMTC
46 /*
47 * SMTC kernel needs to do a software replay of queued
48 * IPIs, at the cost of call overhead on each local_irq_enable()
49 */
50 smtc_ipi_replay();
51#endif
43 __asm__ __volatile__( 52 __asm__ __volatile__(
44 "raw_local_irq_enable" 53 "raw_local_irq_enable"
45 : /* no outputs */ 54 : /* no outputs */
@@ -47,6 +56,7 @@ static inline void raw_local_irq_enable(void)
47 : "memory"); 56 : "memory");
48} 57}
49 58
59
50/* 60/*
51 * For cli() we have to insert nops to make sure that the new value 61 * For cli() we have to insert nops to make sure that the new value
52 * has actually arrived in the status register before the end of this 62 * has actually arrived in the status register before the end of this
@@ -185,15 +195,14 @@ __asm__(
185 " .set pop \n" 195 " .set pop \n"
186 " .endm \n"); 196 " .endm \n");
187 197
188extern void smtc_ipi_replay(void);
189 198
190static inline void raw_local_irq_restore(unsigned long flags) 199static inline void raw_local_irq_restore(unsigned long flags)
191{ 200{
192 unsigned long __tmp1; 201 unsigned long __tmp1;
193 202
194#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY 203#ifdef CONFIG_MIPS_MT_SMTC
195 /* 204 /*
196 * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred 205 * SMTC kernel needs to do a software replay of queued
197 * IPIs, at the cost of branch and call overhead on each 206 * IPIs, at the cost of branch and call overhead on each
198 * local_irq_restore() 207 * local_irq_restore()
199 */ 208 */
@@ -208,6 +217,17 @@ static inline void raw_local_irq_restore(unsigned long flags)
208 : "memory"); 217 : "memory");
209} 218}
210 219
220static inline void __raw_local_irq_restore(unsigned long flags)
221{
222 unsigned long __tmp1;
223
224 __asm__ __volatile__(
225 "raw_local_irq_restore\t%0"
226 : "=r" (__tmp1)
227 : "0" (flags)
228 : "memory");
229}
230
211static inline int raw_irqs_disabled_flags(unsigned long flags) 231static inline int raw_irqs_disabled_flags(unsigned long flags)
212{ 232{
213#ifdef CONFIG_MIPS_MT_SMTC 233#ifdef CONFIG_MIPS_MT_SMTC
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h
index a46f8e258e6b..979866000da4 100644
--- a/include/asm-mips/mipsregs.h
+++ b/include/asm-mips/mipsregs.h
@@ -1462,7 +1462,7 @@ set_c0_##name(unsigned int set) \
1462{ \ 1462{ \
1463 unsigned int res; \ 1463 unsigned int res; \
1464 unsigned int omt; \ 1464 unsigned int omt; \
1465 unsigned int flags; \ 1465 unsigned long flags; \
1466 \ 1466 \
1467 local_irq_save(flags); \ 1467 local_irq_save(flags); \
1468 omt = __dmt(); \ 1468 omt = __dmt(); \
@@ -1480,7 +1480,7 @@ clear_c0_##name(unsigned int clear) \
1480{ \ 1480{ \
1481 unsigned int res; \ 1481 unsigned int res; \
1482 unsigned int omt; \ 1482 unsigned int omt; \
1483 unsigned int flags; \ 1483 unsigned long flags; \
1484 \ 1484 \
1485 local_irq_save(flags); \ 1485 local_irq_save(flags); \
1486 omt = __dmt(); \ 1486 omt = __dmt(); \
@@ -1498,7 +1498,7 @@ change_c0_##name(unsigned int change, unsigned int new) \
1498{ \ 1498{ \
1499 unsigned int res; \ 1499 unsigned int res; \
1500 unsigned int omt; \ 1500 unsigned int omt; \
1501 unsigned int flags; \ 1501 unsigned long flags; \
1502 \ 1502 \
1503 local_irq_save(flags); \ 1503 local_irq_save(flags); \
1504 \ 1504 \
diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h
index 3639b28f80db..ea60bf08dcb0 100644
--- a/include/asm-mips/smtc.h
+++ b/include/asm-mips/smtc.h
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <asm/mips_mt.h> 8#include <asm/mips_mt.h>
9#include <asm/smtc_ipi.h>
9 10
10/* 11/*
11 * System-wide SMTC status information 12 * System-wide SMTC status information
@@ -38,14 +39,15 @@ struct mm_struct;
38struct task_struct; 39struct task_struct;
39 40
40void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); 41void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
41 42void self_ipi(struct smtc_ipi *);
42void smtc_flush_tlb_asid(unsigned long asid); 43void smtc_flush_tlb_asid(unsigned long asid);
43extern int mipsmt_build_cpu_map(int startslot); 44extern int smtc_build_cpu_map(int startslot);
44extern void mipsmt_prepare_cpus(void); 45extern void smtc_prepare_cpus(int cpus);
45extern void smtc_smp_finish(void); 46extern void smtc_smp_finish(void);
46extern void smtc_boot_secondary(int cpu, struct task_struct *t); 47extern void smtc_boot_secondary(int cpu, struct task_struct *t);
47extern void smtc_cpus_done(void); 48extern void smtc_cpus_done(void);
48 49
50
49/* 51/*
50 * Sharing the TLB between multiple VPEs means that the 52 * Sharing the TLB between multiple VPEs means that the
51 * "random" index selection function is not allowed to 53 * "random" index selection function is not allowed to
diff --git a/include/asm-mips/sn/mapped_kernel.h b/include/asm-mips/sn/mapped_kernel.h
index c3dd5d0d525f..721496a0bb92 100644
--- a/include/asm-mips/sn/mapped_kernel.h
+++ b/include/asm-mips/sn/mapped_kernel.h
@@ -5,6 +5,8 @@
5#ifndef __ASM_SN_MAPPED_KERNEL_H 5#ifndef __ASM_SN_MAPPED_KERNEL_H
6#define __ASM_SN_MAPPED_KERNEL_H 6#define __ASM_SN_MAPPED_KERNEL_H
7 7
8#include <linux/mmzone.h>
9
8/* 10/*
9 * Note on how mapped kernels work: the text and data section is 11 * Note on how mapped kernels work: the text and data section is
10 * compiled at cksseg segment (LOADADDR = 0xc001c000), and the 12 * compiled at cksseg segment (LOADADDR = 0xc001c000), and the
@@ -29,10 +31,8 @@
29#define MAPPED_ADDR_RO_TO_PHYS(x) (x - REP_BASE) 31#define MAPPED_ADDR_RO_TO_PHYS(x) (x - REP_BASE)
30#define MAPPED_ADDR_RW_TO_PHYS(x) (x - REP_BASE - 16777216) 32#define MAPPED_ADDR_RW_TO_PHYS(x) (x - REP_BASE - 16777216)
31 33
32#define MAPPED_KERN_RO_PHYSBASE(n) \ 34#define MAPPED_KERN_RO_PHYSBASE(n) (hub_data(n)->kern_vars.kv_ro_baseaddr)
33 (PLAT_NODE_DATA(n)->kern_vars.kv_ro_baseaddr) 35#define MAPPED_KERN_RW_PHYSBASE(n) (hub_data(n)->kern_vars.kv_rw_baseaddr)
34#define MAPPED_KERN_RW_PHYSBASE(n) \
35 (PLAT_NODE_DATA(n)->kern_vars.kv_rw_baseaddr)
36 36
37#define MAPPED_KERN_RO_TO_PHYS(x) \ 37#define MAPPED_KERN_RO_TO_PHYS(x) \
38 ((unsigned long)MAPPED_ADDR_RO_TO_PHYS(x) | \ 38 ((unsigned long)MAPPED_ADDR_RO_TO_PHYS(x) | \
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h
index 051e1af0bb95..4c37c4e5f72e 100644
--- a/include/asm-mips/stackframe.h
+++ b/include/asm-mips/stackframe.h
@@ -297,14 +297,31 @@
297#ifdef CONFIG_MIPS_MT_SMTC 297#ifdef CONFIG_MIPS_MT_SMTC
298 .set mips32r2 298 .set mips32r2
299 /* 299 /*
300 * This may not really be necessary if ints are already 300 * We need to make sure the read-modify-write
301 * inhibited here. 301 * of Status below isn't perturbed by an interrupt
302 * or cross-TC access, so we need to do at least a DMT,
303 * protected by an interrupt-inhibit. But setting IXMT
304 * also creates a few-cycle window where an IPI could
305 * be queued and not be detected before potentially
306 * returning to a WAIT or user-mode loop. It must be
307 * replayed.
308 *
309 * We're in the middle of a context switch, and
310 * we can't dispatch it directly without trashing
311 * some registers, so we'll try to detect this unlikely
312 * case and program a software interrupt in the VPE,
313 * as would be done for a cross-VPE IPI. To accomodate
314 * the handling of that case, we're doing a DVPE instead
315 * of just a DMT here to protect against other threads.
316 * This is a lot of cruft to cover a tiny window.
317 * If you can find a better design, implement it!
318 *
302 */ 319 */
303 mfc0 v0, CP0_TCSTATUS 320 mfc0 v0, CP0_TCSTATUS
304 ori v0, TCSTATUS_IXMT 321 ori v0, TCSTATUS_IXMT
305 mtc0 v0, CP0_TCSTATUS 322 mtc0 v0, CP0_TCSTATUS
306 _ehb 323 _ehb
307 DMT 5 # dmt a1 324 DVPE 5 # dvpe a1
308 jal mips_ihb 325 jal mips_ihb
309#endif /* CONFIG_MIPS_MT_SMTC */ 326#endif /* CONFIG_MIPS_MT_SMTC */
310 mfc0 a0, CP0_STATUS 327 mfc0 a0, CP0_STATUS
@@ -325,17 +342,50 @@
325 */ 342 */
326 LONG_L v1, PT_TCSTATUS(sp) 343 LONG_L v1, PT_TCSTATUS(sp)
327 _ehb 344 _ehb
328 mfc0 v0, CP0_TCSTATUS 345 mfc0 a0, CP0_TCSTATUS
329 andi v1, TCSTATUS_IXMT 346 andi v1, TCSTATUS_IXMT
330 /* We know that TCStatua.IXMT should be set from above */ 347 bnez v1, 0f
331 xori v0, v0, TCSTATUS_IXMT 348
332 or v0, v0, v1 349/*
333 mtc0 v0, CP0_TCSTATUS 350 * We'd like to detect any IPIs queued in the tiny window
334 _ehb 351 * above and request an software interrupt to service them
335 andi a1, a1, VPECONTROL_TE 352 * when we ERET.
353 *
354 * Computing the offset into the IPIQ array of the executing
355 * TC's IPI queue in-line would be tedious. We use part of
356 * the TCContext register to hold 16 bits of offset that we
357 * can add in-line to find the queue head.
358 */
359 mfc0 v0, CP0_TCCONTEXT
360 la a2, IPIQ
361 srl v0, v0, 16
362 addu a2, a2, v0
363 LONG_L v0, 0(a2)
364 beqz v0, 0f
365/*
366 * If we have a queue, provoke dispatch within the VPE by setting C_SW1
367 */
368 mfc0 v0, CP0_CAUSE
369 ori v0, v0, C_SW1
370 mtc0 v0, CP0_CAUSE
3710:
372 /*
373 * This test should really never branch but
374 * let's be prudent here. Having atomized
375 * the shared register modifications, we can
376 * now EVPE, and must do so before interrupts
377 * are potentially re-enabled.
378 */
379 andi a1, a1, MVPCONTROL_EVP
336 beqz a1, 1f 380 beqz a1, 1f
337 emt 381 evpe
3381: 3821:
383 /* We know that TCStatua.IXMT should be set from above */
384 xori a0, a0, TCSTATUS_IXMT
385 or a0, a0, v1
386 mtc0 a0, CP0_TCSTATUS
387 _ehb
388
339 .set mips0 389 .set mips0
340#endif /* CONFIG_MIPS_MT_SMTC */ 390#endif /* CONFIG_MIPS_MT_SMTC */
341 LONG_L v1, PT_EPC(sp) 391 LONG_L v1, PT_EPC(sp)
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index 515d4dce96b5..45806d60bcbe 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -7,6 +7,7 @@
7#include <linux/compiler.h> 7#include <linux/compiler.h>
8#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/prefetch.h> 9#include <linux/prefetch.h>
10#include <linux/lockdep.h>
10#include <asm/page.h> 11#include <asm/page.h>
11 12
12/* 13/*
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 6d93dce61cbb..2f245fe63bda 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -47,14 +47,22 @@ enum hrtimer_restart {
47 * HRTIMER_CB_IRQSAFE: Callback may run in hardirq context 47 * HRTIMER_CB_IRQSAFE: Callback may run in hardirq context
48 * HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and 48 * HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and
49 * does not restart the timer 49 * does not restart the timer
50 * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in hardirq context 50 * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context
51 * Special mode for tick emultation 51 * Special mode for tick emulation and
52 * scheduler timer. Such timers are per
53 * cpu and not allowed to be migrated on
54 * cpu unplug.
55 * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context
56 * with timer->base lock unlocked
57 * used for timers which call wakeup to
58 * avoid lock order problems with rq->lock
52 */ 59 */
53enum hrtimer_cb_mode { 60enum hrtimer_cb_mode {
54 HRTIMER_CB_SOFTIRQ, 61 HRTIMER_CB_SOFTIRQ,
55 HRTIMER_CB_IRQSAFE, 62 HRTIMER_CB_IRQSAFE,
56 HRTIMER_CB_IRQSAFE_NO_RESTART, 63 HRTIMER_CB_IRQSAFE_NO_RESTART,
57 HRTIMER_CB_IRQSAFE_NO_SOFTIRQ, 64 HRTIMER_CB_IRQSAFE_PERCPU,
65 HRTIMER_CB_IRQSAFE_UNLOCKED,
58}; 66};
59 67
60/* 68/*
@@ -67,9 +75,10 @@ enum hrtimer_cb_mode {
67 * 0x02 callback function running 75 * 0x02 callback function running
68 * 0x04 callback pending (high resolution mode) 76 * 0x04 callback pending (high resolution mode)
69 * 77 *
70 * Special case: 78 * Special cases:
71 * 0x03 callback function running and enqueued 79 * 0x03 callback function running and enqueued
72 * (was requeued on another CPU) 80 * (was requeued on another CPU)
81 * 0x09 timer was migrated on CPU hotunplug
73 * The "callback function running and enqueued" status is only possible on 82 * The "callback function running and enqueued" status is only possible on
74 * SMP. It happens for example when a posix timer expired and the callback 83 * SMP. It happens for example when a posix timer expired and the callback
75 * queued a signal. Between dropping the lock which protects the posix timer 84 * queued a signal. Between dropping the lock which protects the posix timer
@@ -87,6 +96,7 @@ enum hrtimer_cb_mode {
87#define HRTIMER_STATE_ENQUEUED 0x01 96#define HRTIMER_STATE_ENQUEUED 0x01
88#define HRTIMER_STATE_CALLBACK 0x02 97#define HRTIMER_STATE_CALLBACK 0x02
89#define HRTIMER_STATE_PENDING 0x04 98#define HRTIMER_STATE_PENDING 0x04
99#define HRTIMER_STATE_MIGRATE 0x08
90 100
91/** 101/**
92 * struct hrtimer - the basic hrtimer structure 102 * struct hrtimer - the basic hrtimer structure
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 1524829f73f2..6514db8fd2e4 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -366,7 +366,9 @@ enum {
366 /* Currently on a filemark */ 366 /* Currently on a filemark */
367 IDE_AFLAG_FILEMARK = (1 << 25), 367 IDE_AFLAG_FILEMARK = (1 << 25),
368 /* 0 = no tape is loaded, so we don't rewind after ejecting */ 368 /* 0 = no tape is loaded, so we don't rewind after ejecting */
369 IDE_AFLAG_MEDIUM_PRESENT = (1 << 26) 369 IDE_AFLAG_MEDIUM_PRESENT = (1 << 26),
370
371 IDE_AFLAG_NO_AUTOCLOSE = (1 << 27),
370}; 372};
371 373
372struct ide_drive_s { 374struct ide_drive_s {
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index b160fb18e8d6..37aaf2b39863 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -6,6 +6,7 @@ extern int ramfs_get_sb(struct file_system_type *fs_type,
6 int flags, const char *dev_name, void *data, struct vfsmount *mnt); 6 int flags, const char *dev_name, void *data, struct vfsmount *mnt);
7 7
8#ifndef CONFIG_MMU 8#ifndef CONFIG_MMU
9extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize);
9extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file, 10extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
10 unsigned long addr, 11 unsigned long addr,
11 unsigned long len, 12 unsigned long len,
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 5da9794b2d78..b106fd8e0d5c 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -1,6 +1,8 @@
1#ifndef __LINUX_STACKTRACE_H 1#ifndef __LINUX_STACKTRACE_H
2#define __LINUX_STACKTRACE_H 2#define __LINUX_STACKTRACE_H
3 3
4struct task_struct;
5
4#ifdef CONFIG_STACKTRACE 6#ifdef CONFIG_STACKTRACE
5struct stack_trace { 7struct stack_trace {
6 unsigned int nr_entries, max_entries; 8 unsigned int nr_entries, max_entries;