aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/ata.h110
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/cdrom.h3
-rw-r--r--include/linux/cpu.h17
-rw-r--r--include/linux/debug_locks.h5
-rw-r--r--include/linux/futex.h6
-rw-r--r--include/linux/hardirq.h6
-rw-r--r--include/linux/hdreg.h64
-rw-r--r--include/linux/hrtimer.h14
-rw-r--r--include/linux/i2c-id.h2
-rw-r--r--include/linux/ide.h322
-rw-r--r--include/linux/init_task.h7
-rw-r--r--include/linux/interrupt.h1
-rw-r--r--include/linux/jiffies.h6
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/latencytop.h44
-rw-r--r--include/linux/libata.h184
-rw-r--r--include/linux/notifier.h4
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/rcuclassic.h164
-rw-r--r--include/linux/rcupdate.h173
-rw-r--r--include/linux/rcupreempt.h86
-rw-r--r--include/linux/rcupreempt_trace.h99
-rw-r--r--include/linux/sched.h83
-rw-r--r--include/linux/smp_lock.h14
-rw-r--r--include/linux/stacktrace.h3
-rw-r--r--include/linux/topology.h5
27 files changed, 936 insertions, 494 deletions
diff --git a/include/linux/ata.h b/include/linux/ata.h
index e672e80202a8..78bbacaed8c4 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -286,9 +286,10 @@ enum {
286 ATA_CBL_NONE = 0, 286 ATA_CBL_NONE = 0,
287 ATA_CBL_PATA40 = 1, 287 ATA_CBL_PATA40 = 1,
288 ATA_CBL_PATA80 = 2, 288 ATA_CBL_PATA80 = 2,
289 ATA_CBL_PATA40_SHORT = 3, /* 40 wire cable to high UDMA spec */ 289 ATA_CBL_PATA40_SHORT = 3, /* 40 wire cable to high UDMA spec */
290 ATA_CBL_PATA_UNK = 4, 290 ATA_CBL_PATA_UNK = 4, /* don't know, maybe 80c? */
291 ATA_CBL_SATA = 5, 291 ATA_CBL_PATA_IGN = 5, /* don't know, ignore cable handling */
292 ATA_CBL_SATA = 6,
292 293
293 /* SATA Status and Control Registers */ 294 /* SATA Status and Control Registers */
294 SCR_STATUS = 0, 295 SCR_STATUS = 0,
@@ -324,6 +325,13 @@ enum {
324 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ 325 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
325 ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ 326 ATA_TFLAG_FUA = (1 << 5), /* enable FUA */
326 ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */ 327 ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */
328
329 /* protocol flags */
330 ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */
331 ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */
332 ATA_PROT_FLAG_DATA = ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA,
333 ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */
334 ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */
327}; 335};
328 336
329enum ata_tf_protocols { 337enum ata_tf_protocols {
@@ -333,9 +341,9 @@ enum ata_tf_protocols {
333 ATA_PROT_PIO, /* PIO data xfer */ 341 ATA_PROT_PIO, /* PIO data xfer */
334 ATA_PROT_DMA, /* DMA */ 342 ATA_PROT_DMA, /* DMA */
335 ATA_PROT_NCQ, /* NCQ */ 343 ATA_PROT_NCQ, /* NCQ */
336 ATA_PROT_ATAPI, /* packet command, PIO data xfer*/ 344 ATAPI_PROT_NODATA, /* packet command, no data */
337 ATA_PROT_ATAPI_NODATA, /* packet command, no data */ 345 ATAPI_PROT_PIO, /* packet command, PIO data xfer*/
338 ATA_PROT_ATAPI_DMA, /* packet command with special DMA sauce */ 346 ATAPI_PROT_DMA, /* packet command with special DMA sauce */
339}; 347};
340 348
341enum ata_ioctls { 349enum ata_ioctls {
@@ -346,8 +354,8 @@ enum ata_ioctls {
346/* core structures */ 354/* core structures */
347 355
348struct ata_prd { 356struct ata_prd {
349 u32 addr; 357 __le32 addr;
350 u32 flags_len; 358 __le32 flags_len;
351}; 359};
352 360
353struct ata_taskfile { 361struct ata_taskfile {
@@ -373,13 +381,69 @@ struct ata_taskfile {
373 u8 command; /* IO operation */ 381 u8 command; /* IO operation */
374}; 382};
375 383
384/*
385 * protocol tests
386 */
387static inline unsigned int ata_prot_flags(u8 prot)
388{
389 switch (prot) {
390 case ATA_PROT_NODATA:
391 return 0;
392 case ATA_PROT_PIO:
393 return ATA_PROT_FLAG_PIO;
394 case ATA_PROT_DMA:
395 return ATA_PROT_FLAG_DMA;
396 case ATA_PROT_NCQ:
397 return ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ;
398 case ATAPI_PROT_NODATA:
399 return ATA_PROT_FLAG_ATAPI;
400 case ATAPI_PROT_PIO:
401 return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO;
402 case ATAPI_PROT_DMA:
403 return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA;
404 }
405 return 0;
406}
407
408static inline int ata_is_atapi(u8 prot)
409{
410 return ata_prot_flags(prot) & ATA_PROT_FLAG_ATAPI;
411}
412
413static inline int ata_is_nodata(u8 prot)
414{
415 return !(ata_prot_flags(prot) & ATA_PROT_FLAG_DATA);
416}
417
418static inline int ata_is_pio(u8 prot)
419{
420 return ata_prot_flags(prot) & ATA_PROT_FLAG_PIO;
421}
422
423static inline int ata_is_dma(u8 prot)
424{
425 return ata_prot_flags(prot) & ATA_PROT_FLAG_DMA;
426}
427
428static inline int ata_is_ncq(u8 prot)
429{
430 return ata_prot_flags(prot) & ATA_PROT_FLAG_NCQ;
431}
432
433static inline int ata_is_data(u8 prot)
434{
435 return ata_prot_flags(prot) & ATA_PROT_FLAG_DATA;
436}
437
438/*
439 * id tests
440 */
376#define ata_id_is_ata(id) (((id)[0] & (1 << 15)) == 0) 441#define ata_id_is_ata(id) (((id)[0] & (1 << 15)) == 0)
377#define ata_id_has_lba(id) ((id)[49] & (1 << 9)) 442#define ata_id_has_lba(id) ((id)[49] & (1 << 9))
378#define ata_id_has_dma(id) ((id)[49] & (1 << 8)) 443#define ata_id_has_dma(id) ((id)[49] & (1 << 8))
379#define ata_id_has_ncq(id) ((id)[76] & (1 << 8)) 444#define ata_id_has_ncq(id) ((id)[76] & (1 << 8))
380#define ata_id_queue_depth(id) (((id)[75] & 0x1f) + 1) 445#define ata_id_queue_depth(id) (((id)[75] & 0x1f) + 1)
381#define ata_id_removeable(id) ((id)[0] & (1 << 7)) 446#define ata_id_removeable(id) ((id)[0] & (1 << 7))
382#define ata_id_has_dword_io(id) ((id)[48] & (1 << 0))
383#define ata_id_has_atapi_AN(id) \ 447#define ata_id_has_atapi_AN(id) \
384 ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \ 448 ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \
385 ((id)[78] & (1 << 5)) ) 449 ((id)[78] & (1 << 5)) )
@@ -415,6 +479,7 @@ static inline bool ata_id_has_dipm(const u16 *id)
415 return val & (1 << 3); 479 return val & (1 << 3);
416} 480}
417 481
482
418static inline int ata_id_has_fua(const u16 *id) 483static inline int ata_id_has_fua(const u16 *id)
419{ 484{
420 if ((id[84] & 0xC000) != 0x4000) 485 if ((id[84] & 0xC000) != 0x4000)
@@ -519,6 +584,26 @@ static inline int ata_id_is_sata(const u16 *id)
519 return ata_id_major_version(id) >= 5 && id[93] == 0; 584 return ata_id_major_version(id) >= 5 && id[93] == 0;
520} 585}
521 586
587static inline int ata_id_has_tpm(const u16 *id)
588{
589 /* The TPM bits are only valid on ATA8 */
590 if (ata_id_major_version(id) < 8)
591 return 0;
592 if ((id[48] & 0xC000) != 0x4000)
593 return 0;
594 return id[48] & (1 << 0);
595}
596
597static inline int ata_id_has_dword_io(const u16 *id)
598{
599 /* ATA 8 reuses this flag for "trusted" computing */
600 if (ata_id_major_version(id) > 7)
601 return 0;
602 if (id[48] & (1 << 0))
603 return 1;
604 return 0;
605}
606
522static inline int ata_id_current_chs_valid(const u16 *id) 607static inline int ata_id_current_chs_valid(const u16 *id)
523{ 608{
524 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command 609 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@@ -574,13 +659,6 @@ static inline int atapi_command_packet_set(const u16 *dev_id)
574 return (dev_id[0] >> 8) & 0x1f; 659 return (dev_id[0] >> 8) & 0x1f;
575} 660}
576 661
577static inline int is_atapi_taskfile(const struct ata_taskfile *tf)
578{
579 return (tf->protocol == ATA_PROT_ATAPI) ||
580 (tf->protocol == ATA_PROT_ATAPI_NODATA) ||
581 (tf->protocol == ATA_PROT_ATAPI_DMA);
582}
583
584static inline int is_multi_taskfile(struct ata_taskfile *tf) 662static inline int is_multi_taskfile(struct ata_taskfile *tf)
585{ 663{
586 return (tf->command == ATA_CMD_READ_MULTI) || 664 return (tf->command == ATA_CMD_READ_MULTI) ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d18ee67b40f8..40ee1706caa3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -144,7 +144,6 @@ enum rq_cmd_type_bits {
144 * private REQ_LB opcodes to differentiate what type of request this is 144 * private REQ_LB opcodes to differentiate what type of request this is
145 */ 145 */
146 REQ_TYPE_ATA_CMD, 146 REQ_TYPE_ATA_CMD,
147 REQ_TYPE_ATA_TASK,
148 REQ_TYPE_ATA_TASKFILE, 147 REQ_TYPE_ATA_TASKFILE,
149 REQ_TYPE_ATA_PC, 148 REQ_TYPE_ATA_PC,
150}; 149};
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index c6d3e22c0624..fcdc11b9609b 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -451,6 +451,7 @@ struct cdrom_generic_command
451#define GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e 451#define GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e
452#define GPCMD_READ_10 0x28 452#define GPCMD_READ_10 0x28
453#define GPCMD_READ_12 0xa8 453#define GPCMD_READ_12 0xa8
454#define GPCMD_READ_BUFFER 0x3c
454#define GPCMD_READ_BUFFER_CAPACITY 0x5c 455#define GPCMD_READ_BUFFER_CAPACITY 0x5c
455#define GPCMD_READ_CDVD_CAPACITY 0x25 456#define GPCMD_READ_CDVD_CAPACITY 0x25
456#define GPCMD_READ_CD 0xbe 457#define GPCMD_READ_CD 0xbe
@@ -480,7 +481,9 @@ struct cdrom_generic_command
480#define GPCMD_TEST_UNIT_READY 0x00 481#define GPCMD_TEST_UNIT_READY 0x00
481#define GPCMD_VERIFY_10 0x2f 482#define GPCMD_VERIFY_10 0x2f
482#define GPCMD_WRITE_10 0x2a 483#define GPCMD_WRITE_10 0x2a
484#define GPCMD_WRITE_12 0xaa
483#define GPCMD_WRITE_AND_VERIFY_10 0x2e 485#define GPCMD_WRITE_AND_VERIFY_10 0x2e
486#define GPCMD_WRITE_BUFFER 0x3b
484/* This is listed as optional in ATAPI 2.6, but is (curiously) 487/* This is listed as optional in ATAPI 2.6, but is (curiously)
485 * missing from Mt. Fuji, Table 57. It _is_ mentioned in Mt. Fuji 488 * missing from Mt. Fuji, Table 57. It _is_ mentioned in Mt. Fuji
486 * Table 377 as an MMC command for SCSi devices though... Most ATAPI 489 * Table 377 as an MMC command for SCSi devices though... Most ATAPI
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 92f2029a34f3..0be8d65bc3c8 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -71,18 +71,27 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
71 71
72int cpu_up(unsigned int cpu); 72int cpu_up(unsigned int cpu);
73 73
74extern void cpu_hotplug_init(void);
75
74#else 76#else
75 77
76static inline int register_cpu_notifier(struct notifier_block *nb) 78static inline int register_cpu_notifier(struct notifier_block *nb)
77{ 79{
78 return 0; 80 return 0;
79} 81}
82
80static inline void unregister_cpu_notifier(struct notifier_block *nb) 83static inline void unregister_cpu_notifier(struct notifier_block *nb)
81{ 84{
82} 85}
83 86
87static inline void cpu_hotplug_init(void)
88{
89}
90
84#endif /* CONFIG_SMP */ 91#endif /* CONFIG_SMP */
85extern struct sysdev_class cpu_sysdev_class; 92extern struct sysdev_class cpu_sysdev_class;
93extern void cpu_maps_update_begin(void);
94extern void cpu_maps_update_done(void);
86 95
87#ifdef CONFIG_HOTPLUG_CPU 96#ifdef CONFIG_HOTPLUG_CPU
88/* Stop CPUs going up and down. */ 97/* Stop CPUs going up and down. */
@@ -97,8 +106,8 @@ static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
97 mutex_unlock(cpu_hp_mutex); 106 mutex_unlock(cpu_hp_mutex);
98} 107}
99 108
100extern void lock_cpu_hotplug(void); 109extern void get_online_cpus(void);
101extern void unlock_cpu_hotplug(void); 110extern void put_online_cpus(void);
102#define hotcpu_notifier(fn, pri) { \ 111#define hotcpu_notifier(fn, pri) { \
103 static struct notifier_block fn##_nb = \ 112 static struct notifier_block fn##_nb = \
104 { .notifier_call = fn, .priority = pri }; \ 113 { .notifier_call = fn, .priority = pri }; \
@@ -115,8 +124,8 @@ static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
115static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) 124static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
116{ } 125{ }
117 126
118#define lock_cpu_hotplug() do { } while (0) 127#define get_online_cpus() do { } while (0)
119#define unlock_cpu_hotplug() do { } while (0) 128#define put_online_cpus() do { } while (0)
120#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) 129#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
121/* These aren't inline functions due to a GCC bug. */ 130/* These aren't inline functions due to a GCC bug. */
122#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) 131#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 1678a5de7013..f4a5871767f5 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -47,6 +47,7 @@ struct task_struct;
47 47
48#ifdef CONFIG_LOCKDEP 48#ifdef CONFIG_LOCKDEP
49extern void debug_show_all_locks(void); 49extern void debug_show_all_locks(void);
50extern void __debug_show_held_locks(struct task_struct *task);
50extern void debug_show_held_locks(struct task_struct *task); 51extern void debug_show_held_locks(struct task_struct *task);
51extern void debug_check_no_locks_freed(const void *from, unsigned long len); 52extern void debug_check_no_locks_freed(const void *from, unsigned long len);
52extern void debug_check_no_locks_held(struct task_struct *task); 53extern void debug_check_no_locks_held(struct task_struct *task);
@@ -55,6 +56,10 @@ static inline void debug_show_all_locks(void)
55{ 56{
56} 57}
57 58
59static inline void __debug_show_held_locks(struct task_struct *task)
60{
61}
62
58static inline void debug_show_held_locks(struct task_struct *task) 63static inline void debug_show_held_locks(struct task_struct *task)
59{ 64{
60} 65}
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 92d420fe03f8..1a15f8e237a7 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -1,8 +1,12 @@
1#ifndef _LINUX_FUTEX_H 1#ifndef _LINUX_FUTEX_H
2#define _LINUX_FUTEX_H 2#define _LINUX_FUTEX_H
3 3
4#include <linux/sched.h> 4#include <linux/compiler.h>
5#include <linux/types.h>
5 6
7struct inode;
8struct mm_struct;
9struct task_struct;
6union ktime; 10union ktime;
7 11
8/* Second argument to futex syscall */ 12/* Second argument to futex syscall */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 8d302298a161..2961ec788046 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -72,11 +72,7 @@
72#define in_softirq() (softirq_count()) 72#define in_softirq() (softirq_count())
73#define in_interrupt() (irq_count()) 73#define in_interrupt() (irq_count())
74 74
75#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) 75#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
76# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
77#else
78# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
79#endif
80 76
81#ifdef CONFIG_PREEMPT 77#ifdef CONFIG_PREEMPT
82# define PREEMPT_CHECK_OFFSET 1 78# define PREEMPT_CHECK_OFFSET 1
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h
index 818c6afc1091..ff43f8d6b5b3 100644
--- a/include/linux/hdreg.h
+++ b/include/linux/hdreg.h
@@ -44,7 +44,9 @@
44 44
45/* Bits for HD_ERROR */ 45/* Bits for HD_ERROR */
46#define MARK_ERR 0x01 /* Bad address mark */ 46#define MARK_ERR 0x01 /* Bad address mark */
47#define ILI_ERR 0x01 /* Illegal Length Indication (ATAPI) */
47#define TRK0_ERR 0x02 /* couldn't find track 0 */ 48#define TRK0_ERR 0x02 /* couldn't find track 0 */
49#define EOM_ERR 0x02 /* End Of Media (ATAPI) */
48#define ABRT_ERR 0x04 /* Command aborted */ 50#define ABRT_ERR 0x04 /* Command aborted */
49#define MCR_ERR 0x08 /* media change request */ 51#define MCR_ERR 0x08 /* media change request */
50#define ID_ERR 0x10 /* ID field not found */ 52#define ID_ERR 0x10 /* ID field not found */
@@ -52,6 +54,7 @@
52#define ECC_ERR 0x40 /* Uncorrectable ECC error */ 54#define ECC_ERR 0x40 /* Uncorrectable ECC error */
53#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */ 55#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */
54#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */ 56#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
57#define LFS_ERR 0xf0 /* Last Failed Sense (ATAPI) */
55 58
56/* Bits of HD_NSECTOR */ 59/* Bits of HD_NSECTOR */
57#define CD 0x01 60#define CD 0x01
@@ -70,13 +73,13 @@
70#define HDIO_DRIVE_HOB_HDR_SIZE (8 * sizeof(__u8)) 73#define HDIO_DRIVE_HOB_HDR_SIZE (8 * sizeof(__u8))
71#define HDIO_DRIVE_TASK_HDR_SIZE (8 * sizeof(__u8)) 74#define HDIO_DRIVE_TASK_HDR_SIZE (8 * sizeof(__u8))
72 75
73#define IDE_DRIVE_TASK_INVALID -1
74#define IDE_DRIVE_TASK_NO_DATA 0 76#define IDE_DRIVE_TASK_NO_DATA 0
77#ifndef __KERNEL__
78#define IDE_DRIVE_TASK_INVALID -1
75#define IDE_DRIVE_TASK_SET_XFER 1 79#define IDE_DRIVE_TASK_SET_XFER 1
76
77#define IDE_DRIVE_TASK_IN 2 80#define IDE_DRIVE_TASK_IN 2
78
79#define IDE_DRIVE_TASK_OUT 3 81#define IDE_DRIVE_TASK_OUT 3
82#endif
80#define IDE_DRIVE_TASK_RAW_WRITE 4 83#define IDE_DRIVE_TASK_RAW_WRITE 4
81 84
82/* 85/*
@@ -87,10 +90,10 @@
87#ifndef __KERNEL__ 90#ifndef __KERNEL__
88#define IDE_TASKFILE_STD_OUT_FLAGS 0xFE 91#define IDE_TASKFILE_STD_OUT_FLAGS 0xFE
89#define IDE_HOB_STD_OUT_FLAGS 0x3C 92#define IDE_HOB_STD_OUT_FLAGS 0x3C
90#endif
91 93
92typedef unsigned char task_ioreg_t; 94typedef unsigned char task_ioreg_t;
93typedef unsigned long sata_ioreg_t; 95typedef unsigned long sata_ioreg_t;
96#endif
94 97
95typedef union ide_reg_valid_s { 98typedef union ide_reg_valid_s {
96 unsigned all : 16; 99 unsigned all : 16;
@@ -116,8 +119,8 @@ typedef union ide_reg_valid_s {
116} ide_reg_valid_t; 119} ide_reg_valid_t;
117 120
118typedef struct ide_task_request_s { 121typedef struct ide_task_request_s {
119 task_ioreg_t io_ports[8]; 122 __u8 io_ports[8];
120 task_ioreg_t hob_ports[8]; 123 __u8 hob_ports[8]; /* bytes 6 and 7 are unused */
121 ide_reg_valid_t out_flags; 124 ide_reg_valid_t out_flags;
122 ide_reg_valid_t in_flags; 125 ide_reg_valid_t in_flags;
123 int data_phase; 126 int data_phase;
@@ -133,36 +136,35 @@ typedef struct ide_ioctl_request_s {
133} ide_ioctl_request_t; 136} ide_ioctl_request_t;
134 137
135struct hd_drive_cmd_hdr { 138struct hd_drive_cmd_hdr {
136 task_ioreg_t command; 139 __u8 command;
137 task_ioreg_t sector_number; 140 __u8 sector_number;
138 task_ioreg_t feature; 141 __u8 feature;
139 task_ioreg_t sector_count; 142 __u8 sector_count;
140}; 143};
141 144
145#ifndef __KERNEL__
142typedef struct hd_drive_task_hdr { 146typedef struct hd_drive_task_hdr {
143 task_ioreg_t data; 147 __u8 data;
144 task_ioreg_t feature; 148 __u8 feature;
145 task_ioreg_t sector_count; 149 __u8 sector_count;
146 task_ioreg_t sector_number; 150 __u8 sector_number;
147 task_ioreg_t low_cylinder; 151 __u8 low_cylinder;
148 task_ioreg_t high_cylinder; 152 __u8 high_cylinder;
149 task_ioreg_t device_head; 153 __u8 device_head;
150 task_ioreg_t command; 154 __u8 command;
151} task_struct_t; 155} task_struct_t;
152 156
153typedef struct hd_drive_hob_hdr { 157typedef struct hd_drive_hob_hdr {
154 task_ioreg_t data; 158 __u8 data;
155 task_ioreg_t feature; 159 __u8 feature;
156 task_ioreg_t sector_count; 160 __u8 sector_count;
157 task_ioreg_t sector_number; 161 __u8 sector_number;
158 task_ioreg_t low_cylinder; 162 __u8 low_cylinder;
159 task_ioreg_t high_cylinder; 163 __u8 high_cylinder;
160 task_ioreg_t device_head; 164 __u8 device_head;
161 task_ioreg_t control; 165 __u8 control;
162} hob_struct_t; 166} hob_struct_t;
163 167#endif
164#define TASKFILE_INVALID 0x7fff
165#define TASKFILE_48 0x8000
166 168
167#define TASKFILE_NO_DATA 0x0000 169#define TASKFILE_NO_DATA 0x0000
168 170
@@ -178,12 +180,16 @@ typedef struct hd_drive_hob_hdr {
178#define TASKFILE_IN_DMAQ 0x0080 180#define TASKFILE_IN_DMAQ 0x0080
179#define TASKFILE_OUT_DMAQ 0x0100 181#define TASKFILE_OUT_DMAQ 0x0100
180 182
183#ifndef __KERNEL__
181#define TASKFILE_P_IN 0x0200 184#define TASKFILE_P_IN 0x0200
182#define TASKFILE_P_OUT 0x0400 185#define TASKFILE_P_OUT 0x0400
183#define TASKFILE_P_IN_DMA 0x0800 186#define TASKFILE_P_IN_DMA 0x0800
184#define TASKFILE_P_OUT_DMA 0x1000 187#define TASKFILE_P_OUT_DMA 0x1000
185#define TASKFILE_P_IN_DMAQ 0x2000 188#define TASKFILE_P_IN_DMAQ 0x2000
186#define TASKFILE_P_OUT_DMAQ 0x4000 189#define TASKFILE_P_OUT_DMAQ 0x4000
190#define TASKFILE_48 0x8000
191#define TASKFILE_INVALID 0x7fff
192#endif
187 193
188/* ATA/ATAPI Commands pre T13 Spec */ 194/* ATA/ATAPI Commands pre T13 Spec */
189#define WIN_NOP 0x00 195#define WIN_NOP 0x00
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 7a9398e19704..49067f14fac1 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -115,10 +115,8 @@ struct hrtimer {
115 enum hrtimer_restart (*function)(struct hrtimer *); 115 enum hrtimer_restart (*function)(struct hrtimer *);
116 struct hrtimer_clock_base *base; 116 struct hrtimer_clock_base *base;
117 unsigned long state; 117 unsigned long state;
118#ifdef CONFIG_HIGH_RES_TIMERS
119 enum hrtimer_cb_mode cb_mode; 118 enum hrtimer_cb_mode cb_mode;
120 struct list_head cb_entry; 119 struct list_head cb_entry;
121#endif
122#ifdef CONFIG_TIMER_STATS 120#ifdef CONFIG_TIMER_STATS
123 void *start_site; 121 void *start_site;
124 char start_comm[16]; 122 char start_comm[16];
@@ -194,10 +192,10 @@ struct hrtimer_cpu_base {
194 spinlock_t lock; 192 spinlock_t lock;
195 struct lock_class_key lock_key; 193 struct lock_class_key lock_key;
196 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; 194 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
195 struct list_head cb_pending;
197#ifdef CONFIG_HIGH_RES_TIMERS 196#ifdef CONFIG_HIGH_RES_TIMERS
198 ktime_t expires_next; 197 ktime_t expires_next;
199 int hres_active; 198 int hres_active;
200 struct list_head cb_pending;
201 unsigned long nr_events; 199 unsigned long nr_events;
202#endif 200#endif
203}; 201};
@@ -217,6 +215,11 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
217 return timer->base->get_time(); 215 return timer->base->get_time();
218} 216}
219 217
218static inline int hrtimer_is_hres_active(struct hrtimer *timer)
219{
220 return timer->base->cpu_base->hres_active;
221}
222
220/* 223/*
221 * The resolution of the clocks. The resolution value is returned in 224 * The resolution of the clocks. The resolution value is returned in
222 * the clock_getres() system call to give application programmers an 225 * the clock_getres() system call to give application programmers an
@@ -248,6 +251,10 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
248 return timer->base->softirq_time; 251 return timer->base->softirq_time;
249} 252}
250 253
254static inline int hrtimer_is_hres_active(struct hrtimer *timer)
255{
256 return 0;
257}
251#endif 258#endif
252 259
253extern ktime_t ktime_get(void); 260extern ktime_t ktime_get(void);
@@ -310,6 +317,7 @@ extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
310 317
311/* Soft interrupt function to run the hrtimer queues: */ 318/* Soft interrupt function to run the hrtimer queues: */
312extern void hrtimer_run_queues(void); 319extern void hrtimer_run_queues(void);
320extern void hrtimer_run_pending(void);
313 321
314/* Bootup initialization: */ 322/* Bootup initialization: */
315extern void __init hrtimers_init(void); 323extern void __init hrtimers_init(void);
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index e18017d45758..c7a51a196f51 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -125,6 +125,8 @@
125#define I2C_DRIVERID_LM4857 92 /* LM4857 Audio Amplifier */ 125#define I2C_DRIVERID_LM4857 92 /* LM4857 Audio Amplifier */
126#define I2C_DRIVERID_VP27SMPX 93 /* Panasonic VP27s tuner internal MPX */ 126#define I2C_DRIVERID_VP27SMPX 93 /* Panasonic VP27s tuner internal MPX */
127#define I2C_DRIVERID_CS4270 94 /* Cirrus Logic 4270 audio codec */ 127#define I2C_DRIVERID_CS4270 94 /* Cirrus Logic 4270 audio codec */
128#define I2C_DRIVERID_M52790 95 /* Mitsubishi M52790SP/FP AV switch */
129#define I2C_DRIVERID_CS5345 96 /* cs5345 audio processor */
128 130
129#define I2C_DRIVERID_I2CDEV 900 131#define I2C_DRIVERID_I2CDEV 900
130#define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */ 132#define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 9a6a41e7079f..1e4409937ec3 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -27,25 +27,10 @@
27#include <asm/semaphore.h> 27#include <asm/semaphore.h>
28#include <asm/mutex.h> 28#include <asm/mutex.h>
29 29
30/****************************************************************************** 30#if defined(CRIS) || defined(FRV)
31 * IDE driver configuration options (play with these as desired): 31# define SUPPORT_VLB_SYNC 0
32 * 32#else
33 * REALLY_SLOW_IO can be defined in ide.c and ide-cd.c, if necessary 33# define SUPPORT_VLB_SYNC 1
34 */
35#define INITIAL_MULT_COUNT 0 /* off=0; on=2,4,8,16,32, etc.. */
36
37#ifndef SUPPORT_SLOW_DATA_PORTS /* 1 to support slow data ports */
38#define SUPPORT_SLOW_DATA_PORTS 1 /* 0 to reduce kernel size */
39#endif
40#ifndef SUPPORT_VLB_SYNC /* 1 to support weird 32-bit chips */
41#define SUPPORT_VLB_SYNC 1 /* 0 to reduce kernel size */
42#endif
43#ifndef OK_TO_RESET_CONTROLLER /* 1 needed for good error recovery */
44#define OK_TO_RESET_CONTROLLER 1 /* 0 for use with AH2372A/B interface */
45#endif
46
47#ifndef DISABLE_IRQ_NOSYNC
48#define DISABLE_IRQ_NOSYNC 0
49#endif 34#endif
50 35
51/* 36/*
@@ -55,10 +40,6 @@
55 40
56#define IDE_NO_IRQ (-1) 41#define IDE_NO_IRQ (-1)
57 42
58/*
59 * "No user-serviceable parts" beyond this point :)
60 *****************************************************************************/
61
62typedef unsigned char byte; /* used everywhere */ 43typedef unsigned char byte; /* used everywhere */
63 44
64/* 45/*
@@ -103,8 +84,6 @@ typedef unsigned char byte; /* used everywhere */
103#define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET 84#define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET
104#define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET 85#define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET
105 86
106#define IDE_CONTROL_OFFSET_HOB (7)
107
108#define IDE_DATA_REG (HWIF(drive)->io_ports[IDE_DATA_OFFSET]) 87#define IDE_DATA_REG (HWIF(drive)->io_ports[IDE_DATA_OFFSET])
109#define IDE_ERROR_REG (HWIF(drive)->io_ports[IDE_ERROR_OFFSET]) 88#define IDE_ERROR_REG (HWIF(drive)->io_ports[IDE_ERROR_OFFSET])
110#define IDE_NSECTOR_REG (HWIF(drive)->io_ports[IDE_NSECTOR_OFFSET]) 89#define IDE_NSECTOR_REG (HWIF(drive)->io_ports[IDE_NSECTOR_OFFSET])
@@ -327,47 +306,16 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw,
327typedef union { 306typedef union {
328 unsigned all : 8; 307 unsigned all : 8;
329 struct { 308 struct {
330#if defined(__LITTLE_ENDIAN_BITFIELD)
331 unsigned set_geometry : 1; 309 unsigned set_geometry : 1;
332 unsigned recalibrate : 1; 310 unsigned recalibrate : 1;
333 unsigned set_multmode : 1; 311 unsigned set_multmode : 1;
334 unsigned set_tune : 1; 312 unsigned set_tune : 1;
335 unsigned serviced : 1; 313 unsigned serviced : 1;
336 unsigned reserved : 3; 314 unsigned reserved : 3;
337#elif defined(__BIG_ENDIAN_BITFIELD)
338 unsigned reserved : 3;
339 unsigned serviced : 1;
340 unsigned set_tune : 1;
341 unsigned set_multmode : 1;
342 unsigned recalibrate : 1;
343 unsigned set_geometry : 1;
344#else
345#error "Please fix <asm/byteorder.h>"
346#endif
347 } b; 315 } b;
348} special_t; 316} special_t;
349 317
350/* 318/*
351 * ATA DATA Register Special.
352 * ATA NSECTOR Count Register().
353 * ATAPI Byte Count Register.
354 */
355typedef union {
356 unsigned all :16;
357 struct {
358#if defined(__LITTLE_ENDIAN_BITFIELD)
359 unsigned low :8; /* LSB */
360 unsigned high :8; /* MSB */
361#elif defined(__BIG_ENDIAN_BITFIELD)
362 unsigned high :8; /* MSB */
363 unsigned low :8; /* LSB */
364#else
365#error "Please fix <asm/byteorder.h>"
366#endif
367 } b;
368} ata_nsector_t, ata_data_t, atapi_bcount_t;
369
370/*
371 * ATA-IDE Select Register, aka Device-Head 319 * ATA-IDE Select Register, aka Device-Head
372 * 320 *
373 * head : always zeros here 321 * head : always zeros here
@@ -398,131 +346,6 @@ typedef union {
398} select_t, ata_select_t; 346} select_t, ata_select_t;
399 347
400/* 348/*
401 * The ATA-IDE Status Register.
402 * The ATAPI Status Register.
403 *
404 * check : Error occurred
405 * idx : Index Error
406 * corr : Correctable error occurred
407 * drq : Data is request by the device
408 * dsc : Disk Seek Complete : ata
409 * : Media access command finished : atapi
410 * df : Device Fault : ata
411 * : Reserved : atapi
412 * drdy : Ready, Command Mode Capable : ata
413 * : Ignored for ATAPI commands : atapi
414 * bsy : Disk is Busy
415 * : The device has access to the command block
416 */
417typedef union {
418 unsigned all :8;
419 struct {
420#if defined(__LITTLE_ENDIAN_BITFIELD)
421 unsigned check :1;
422 unsigned idx :1;
423 unsigned corr :1;
424 unsigned drq :1;
425 unsigned dsc :1;
426 unsigned df :1;
427 unsigned drdy :1;
428 unsigned bsy :1;
429#elif defined(__BIG_ENDIAN_BITFIELD)
430 unsigned bsy :1;
431 unsigned drdy :1;
432 unsigned df :1;
433 unsigned dsc :1;
434 unsigned drq :1;
435 unsigned corr :1;
436 unsigned idx :1;
437 unsigned check :1;
438#else
439#error "Please fix <asm/byteorder.h>"
440#endif
441 } b;
442} ata_status_t, atapi_status_t;
443
444/*
445 * ATAPI Feature Register
446 *
447 * dma : Using DMA or PIO
448 * reserved321 : Reserved
449 * reserved654 : Reserved (Tag Type)
450 * reserved7 : Reserved
451 */
452typedef union {
453 unsigned all :8;
454 struct {
455#if defined(__LITTLE_ENDIAN_BITFIELD)
456 unsigned dma :1;
457 unsigned reserved321 :3;
458 unsigned reserved654 :3;
459 unsigned reserved7 :1;
460#elif defined(__BIG_ENDIAN_BITFIELD)
461 unsigned reserved7 :1;
462 unsigned reserved654 :3;
463 unsigned reserved321 :3;
464 unsigned dma :1;
465#else
466#error "Please fix <asm/byteorder.h>"
467#endif
468 } b;
469} atapi_feature_t;
470
471/*
472 * ATAPI Interrupt Reason Register.
473 *
474 * cod : Information transferred is command (1) or data (0)
475 * io : The device requests us to read (1) or write (0)
476 * reserved : Reserved
477 */
478typedef union {
479 unsigned all :8;
480 struct {
481#if defined(__LITTLE_ENDIAN_BITFIELD)
482 unsigned cod :1;
483 unsigned io :1;
484 unsigned reserved :6;
485#elif defined(__BIG_ENDIAN_BITFIELD)
486 unsigned reserved :6;
487 unsigned io :1;
488 unsigned cod :1;
489#else
490#error "Please fix <asm/byteorder.h>"
491#endif
492 } b;
493} atapi_ireason_t;
494
495/*
496 * The ATAPI error register.
497 *
498 * ili : Illegal Length Indication
499 * eom : End Of Media Detected
500 * abrt : Aborted command - As defined by ATA
501 * mcr : Media Change Requested - As defined by ATA
502 * sense_key : Sense key of the last failed packet command
503 */
504typedef union {
505 unsigned all :8;
506 struct {
507#if defined(__LITTLE_ENDIAN_BITFIELD)
508 unsigned ili :1;
509 unsigned eom :1;
510 unsigned abrt :1;
511 unsigned mcr :1;
512 unsigned sense_key :4;
513#elif defined(__BIG_ENDIAN_BITFIELD)
514 unsigned sense_key :4;
515 unsigned mcr :1;
516 unsigned abrt :1;
517 unsigned eom :1;
518 unsigned ili :1;
519#else
520#error "Please fix <asm/byteorder.h>"
521#endif
522 } b;
523} atapi_error_t;
524
525/*
526 * Status returned from various ide_ functions 349 * Status returned from various ide_ functions
527 */ 350 */
528typedef enum { 351typedef enum {
@@ -701,8 +524,6 @@ typedef struct hwif_s {
701 void (*pre_reset)(ide_drive_t *); 524 void (*pre_reset)(ide_drive_t *);
702 /* routine to reset controller after a disk reset */ 525 /* routine to reset controller after a disk reset */
703 void (*resetproc)(ide_drive_t *); 526 void (*resetproc)(ide_drive_t *);
704 /* special interrupt handling for shared pci interrupts */
705 void (*intrproc)(ide_drive_t *);
706 /* special host masking for drive selection */ 527 /* special host masking for drive selection */
707 void (*maskproc)(ide_drive_t *, int); 528 void (*maskproc)(ide_drive_t *, int);
708 /* check host's drive quirk list */ 529 /* check host's drive quirk list */
@@ -766,7 +587,6 @@ typedef struct hwif_s {
766 int rqsize; /* max sectors per request */ 587 int rqsize; /* max sectors per request */
767 int irq; /* our irq number */ 588 int irq; /* our irq number */
768 589
769 unsigned long dma_master; /* reference base addr dmabase */
770 unsigned long dma_base; /* base addr for dma ports */ 590 unsigned long dma_base; /* base addr for dma ports */
771 unsigned long dma_command; /* dma command register */ 591 unsigned long dma_command; /* dma command register */
772 unsigned long dma_vendor1; /* dma vendor 1 register */ 592 unsigned long dma_vendor1; /* dma vendor 1 register */
@@ -806,7 +626,6 @@ typedef struct hwif_s {
806/* 626/*
807 * internal ide interrupt handler type 627 * internal ide interrupt handler type
808 */ 628 */
809typedef ide_startstop_t (ide_pre_handler_t)(ide_drive_t *, struct request *);
810typedef ide_startstop_t (ide_handler_t)(ide_drive_t *); 629typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
811typedef int (ide_expiry_t)(ide_drive_t *); 630typedef int (ide_expiry_t)(ide_drive_t *);
812 631
@@ -1020,7 +839,8 @@ int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
1020 839
1021extern void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry); 840extern void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry);
1022 841
1023extern void ide_execute_command(ide_drive_t *, task_ioreg_t cmd, ide_handler_t *, unsigned int, ide_expiry_t *); 842void ide_execute_command(ide_drive_t *, u8, ide_handler_t *, unsigned int,
843 ide_expiry_t *);
1024 844
1025ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8); 845ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8);
1026 846
@@ -1062,52 +882,114 @@ extern void ide_end_drive_cmd(ide_drive_t *, u8, u8);
1062 */ 882 */
1063extern int ide_wait_cmd(ide_drive_t *, u8, u8, u8, u8, u8 *); 883extern int ide_wait_cmd(ide_drive_t *, u8, u8, u8, u8, u8 *);
1064 884
885enum {
886 IDE_TFLAG_LBA48 = (1 << 0),
887 IDE_TFLAG_NO_SELECT_MASK = (1 << 1),
888 IDE_TFLAG_FLAGGED = (1 << 2),
889 IDE_TFLAG_OUT_DATA = (1 << 3),
890 IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4),
891 IDE_TFLAG_OUT_HOB_NSECT = (1 << 5),
892 IDE_TFLAG_OUT_HOB_LBAL = (1 << 6),
893 IDE_TFLAG_OUT_HOB_LBAM = (1 << 7),
894 IDE_TFLAG_OUT_HOB_LBAH = (1 << 8),
895 IDE_TFLAG_OUT_HOB = IDE_TFLAG_OUT_HOB_FEATURE |
896 IDE_TFLAG_OUT_HOB_NSECT |
897 IDE_TFLAG_OUT_HOB_LBAL |
898 IDE_TFLAG_OUT_HOB_LBAM |
899 IDE_TFLAG_OUT_HOB_LBAH,
900 IDE_TFLAG_OUT_FEATURE = (1 << 9),
901 IDE_TFLAG_OUT_NSECT = (1 << 10),
902 IDE_TFLAG_OUT_LBAL = (1 << 11),
903 IDE_TFLAG_OUT_LBAM = (1 << 12),
904 IDE_TFLAG_OUT_LBAH = (1 << 13),
905 IDE_TFLAG_OUT_TF = IDE_TFLAG_OUT_FEATURE |
906 IDE_TFLAG_OUT_NSECT |
907 IDE_TFLAG_OUT_LBAL |
908 IDE_TFLAG_OUT_LBAM |
909 IDE_TFLAG_OUT_LBAH,
910 IDE_TFLAG_OUT_DEVICE = (1 << 14),
911 IDE_TFLAG_WRITE = (1 << 15),
912 IDE_TFLAG_FLAGGED_SET_IN_FLAGS = (1 << 16),
913 IDE_TFLAG_IN_DATA = (1 << 17),
914 IDE_TFLAG_CUSTOM_HANDLER = (1 << 18),
915 IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 19),
916 IDE_TFLAG_IN_HOB_FEATURE = (1 << 20),
917 IDE_TFLAG_IN_HOB_NSECT = (1 << 21),
918 IDE_TFLAG_IN_HOB_LBAL = (1 << 22),
919 IDE_TFLAG_IN_HOB_LBAM = (1 << 23),
920 IDE_TFLAG_IN_HOB_LBAH = (1 << 24),
921 IDE_TFLAG_IN_HOB_LBA = IDE_TFLAG_IN_HOB_LBAL |
922 IDE_TFLAG_IN_HOB_LBAM |
923 IDE_TFLAG_IN_HOB_LBAH,
924 IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE |
925 IDE_TFLAG_IN_HOB_NSECT |
926 IDE_TFLAG_IN_HOB_LBA,
927 IDE_TFLAG_IN_NSECT = (1 << 25),
928 IDE_TFLAG_IN_LBAL = (1 << 26),
929 IDE_TFLAG_IN_LBAM = (1 << 27),
930 IDE_TFLAG_IN_LBAH = (1 << 28),
931 IDE_TFLAG_IN_LBA = IDE_TFLAG_IN_LBAL |
932 IDE_TFLAG_IN_LBAM |
933 IDE_TFLAG_IN_LBAH,
934 IDE_TFLAG_IN_TF = IDE_TFLAG_IN_NSECT |
935 IDE_TFLAG_IN_LBA,
936 IDE_TFLAG_IN_DEVICE = (1 << 29),
937};
938
939struct ide_taskfile {
940 u8 hob_data; /* 0: high data byte (for TASKFILE IOCTL) */
941
942 u8 hob_feature; /* 1-5: additional data to support LBA48 */
943 u8 hob_nsect;
944 u8 hob_lbal;
945 u8 hob_lbam;
946 u8 hob_lbah;
947
948 u8 data; /* 6: low data byte (for TASKFILE IOCTL) */
949
950 union { /*  7: */
951 u8 error; /* read: error */
952 u8 feature; /* write: feature */
953 };
954
955 u8 nsect; /* 8: number of sectors */
956 u8 lbal; /* 9: LBA low */
957 u8 lbam; /* 10: LBA mid */
958 u8 lbah; /* 11: LBA high */
959
960 u8 device; /* 12: device select */
961
962 union { /* 13: */
963 u8 status; /*  read: status  */
964 u8 command; /* write: command */
965 };
966};
967
1065typedef struct ide_task_s { 968typedef struct ide_task_s {
1066/* 969 union {
1067 * struct hd_drive_task_hdr tf; 970 struct ide_taskfile tf;
1068 * task_struct_t tf; 971 u8 tf_array[14];
1069 * struct hd_drive_hob_hdr hobf; 972 };
1070 * hob_struct_t hobf; 973 u32 tf_flags;
1071 */
1072 task_ioreg_t tfRegister[8];
1073 task_ioreg_t hobRegister[8];
1074 ide_reg_valid_t tf_out_flags;
1075 ide_reg_valid_t tf_in_flags;
1076 int data_phase; 974 int data_phase;
1077 int command_type;
1078 ide_pre_handler_t *prehandler;
1079 ide_handler_t *handler;
1080 struct request *rq; /* copy of request */ 975 struct request *rq; /* copy of request */
1081 void *special; /* valid_t generally */ 976 void *special; /* valid_t generally */
1082} ide_task_t; 977} ide_task_t;
1083 978
1084extern u32 ide_read_24(ide_drive_t *); 979void ide_tf_load(ide_drive_t *, ide_task_t *);
980void ide_tf_read(ide_drive_t *, ide_task_t *);
1085 981
1086extern void SELECT_DRIVE(ide_drive_t *); 982extern void SELECT_DRIVE(ide_drive_t *);
1087extern void SELECT_INTERRUPT(ide_drive_t *);
1088extern void SELECT_MASK(ide_drive_t *, int); 983extern void SELECT_MASK(ide_drive_t *, int);
1089extern void QUIRK_LIST(ide_drive_t *);
1090 984
1091extern int drive_is_ready(ide_drive_t *); 985extern int drive_is_ready(ide_drive_t *);
1092 986
1093/* 987void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8);
1094 * taskfile io for disks for now...and builds request from ide_ioctl
1095 */
1096extern ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *);
1097
1098/*
1099 * Special Flagged Register Validation Caller
1100 */
1101extern ide_startstop_t flagged_taskfile(ide_drive_t *, ide_task_t *);
1102 988
1103extern ide_startstop_t set_multmode_intr(ide_drive_t *); 989ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *);
1104extern ide_startstop_t set_geometry_intr(ide_drive_t *);
1105extern ide_startstop_t recal_intr(ide_drive_t *);
1106extern ide_startstop_t task_no_data_intr(ide_drive_t *);
1107extern ide_startstop_t task_in_intr(ide_drive_t *);
1108extern ide_startstop_t pre_task_out_intr(ide_drive_t *, struct request *);
1109 990
1110extern int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *); 991int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *, u16);
992int ide_no_data_taskfile(ide_drive_t *, ide_task_t *);
1111 993
1112int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long); 994int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long);
1113int ide_cmd_ioctl(ide_drive_t *, unsigned int, unsigned long); 995int ide_cmd_ioctl(ide_drive_t *, unsigned int, unsigned long);
@@ -1212,6 +1094,7 @@ enum {
1212 IDE_HFLAG_IO_32BIT = (1 << 24), 1094 IDE_HFLAG_IO_32BIT = (1 << 24),
1213 /* unmask IRQs */ 1095 /* unmask IRQs */
1214 IDE_HFLAG_UNMASK_IRQS = (1 << 25), 1096 IDE_HFLAG_UNMASK_IRQS = (1 << 25),
1097 IDE_HFLAG_ABUSE_SET_DMA_MODE = (1 << 26),
1215}; 1098};
1216 1099
1217#ifdef CONFIG_BLK_DEV_OFFBOARD 1100#ifdef CONFIG_BLK_DEV_OFFBOARD
@@ -1229,7 +1112,7 @@ struct ide_port_info {
1229 void (*fixup)(ide_hwif_t *); 1112 void (*fixup)(ide_hwif_t *);
1230 ide_pci_enablebit_t enablebits[2]; 1113 ide_pci_enablebit_t enablebits[2];
1231 hwif_chipset_t chipset; 1114 hwif_chipset_t chipset;
1232 unsigned int extra; 1115 u8 extra;
1233 u32 host_flags; 1116 u32 host_flags;
1234 u8 pio_mask; 1117 u8 pio_mask;
1235 u8 swdma_mask; 1118 u8 swdma_mask;
@@ -1356,6 +1239,7 @@ static inline int ide_dev_is_sata(struct hd_driveid *id)
1356 return 0; 1239 return 0;
1357} 1240}
1358 1241
1242u64 ide_get_lba_addr(struct ide_taskfile *, int);
1359u8 ide_dump_status(ide_drive_t *, const char *, u8); 1243u8 ide_dump_status(ide_drive_t *, const char *, u8);
1360 1244
1361typedef struct ide_pio_timings_s { 1245typedef struct ide_pio_timings_s {
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index cae35b6b9aec..796019b22b6f 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -132,9 +132,12 @@ extern struct group_info init_groups;
132 .cpus_allowed = CPU_MASK_ALL, \ 132 .cpus_allowed = CPU_MASK_ALL, \
133 .mm = NULL, \ 133 .mm = NULL, \
134 .active_mm = &init_mm, \ 134 .active_mm = &init_mm, \
135 .run_list = LIST_HEAD_INIT(tsk.run_list), \ 135 .rt = { \
136 .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
137 .time_slice = HZ, \
138 .nr_cpus_allowed = NR_CPUS, \
139 }, \
136 .ioprio = 0, \ 140 .ioprio = 0, \
137 .time_slice = HZ, \
138 .tasks = LIST_HEAD_INIT(tsk.tasks), \ 141 .tasks = LIST_HEAD_INIT(tsk.tasks), \
139 .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ 142 .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \
140 .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \ 143 .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 2306920fa388..c3db4a00f1fa 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -256,6 +256,7 @@ enum
256#ifdef CONFIG_HIGH_RES_TIMERS 256#ifdef CONFIG_HIGH_RES_TIMERS
257 HRTIMER_SOFTIRQ, 257 HRTIMER_SOFTIRQ,
258#endif 258#endif
259 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
259}; 260};
260 261
261/* softirq mask and active fields moved to irq_cpustat_t in 262/* softirq mask and active fields moved to irq_cpustat_t in
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 8b080024bbc1..7ba9e47bf061 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -29,6 +29,12 @@
29# define SHIFT_HZ 9 29# define SHIFT_HZ 9
30#elif HZ >= 768 && HZ < 1536 30#elif HZ >= 768 && HZ < 1536
31# define SHIFT_HZ 10 31# define SHIFT_HZ 10
32#elif HZ >= 1536 && HZ < 3072
33# define SHIFT_HZ 11
34#elif HZ >= 3072 && HZ < 6144
35# define SHIFT_HZ 12
36#elif HZ >= 6144 && HZ < 12288
37# define SHIFT_HZ 13
32#else 38#else
33# error You lose. 39# error You lose.
34#endif 40#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 94bc99656963..a7283c9beadf 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -105,8 +105,8 @@ struct user;
105 * supposed to. 105 * supposed to.
106 */ 106 */
107#ifdef CONFIG_PREEMPT_VOLUNTARY 107#ifdef CONFIG_PREEMPT_VOLUNTARY
108extern int cond_resched(void); 108extern int _cond_resched(void);
109# define might_resched() cond_resched() 109# define might_resched() _cond_resched()
110#else 110#else
111# define might_resched() do { } while (0) 111# define might_resched() do { } while (0)
112#endif 112#endif
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
new file mode 100644
index 000000000000..901c2d6377a8
--- /dev/null
+++ b/include/linux/latencytop.h
@@ -0,0 +1,44 @@
1/*
2 * latencytop.h: Infrastructure for displaying latency
3 *
4 * (C) Copyright 2008 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 */
8
9#ifndef _INCLUDE_GUARD_LATENCYTOP_H_
10#define _INCLUDE_GUARD_LATENCYTOP_H_
11
12#ifdef CONFIG_LATENCYTOP
13
14#define LT_SAVECOUNT 32
15#define LT_BACKTRACEDEPTH 12
16
17struct latency_record {
18 unsigned long backtrace[LT_BACKTRACEDEPTH];
19 unsigned int count;
20 unsigned long time;
21 unsigned long max;
22};
23
24
25struct task_struct;
26
27void account_scheduler_latency(struct task_struct *task, int usecs, int inter);
28
29void clear_all_latency_tracing(struct task_struct *p);
30
31#else
32
33static inline void
34account_scheduler_latency(struct task_struct *task, int usecs, int inter)
35{
36}
37
38static inline void clear_all_latency_tracing(struct task_struct *p)
39{
40}
41
42#endif
43
44#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 124033cb5e9b..4374c4277780 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -35,6 +35,7 @@
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36#include <scsi/scsi_host.h> 36#include <scsi/scsi_host.h>
37#include <linux/acpi.h> 37#include <linux/acpi.h>
38#include <linux/cdrom.h>
38 39
39/* 40/*
40 * Define if arch has non-standard setup. This is a _PCI_ standard 41 * Define if arch has non-standard setup. This is a _PCI_ standard
@@ -143,10 +144,11 @@ enum {
143 ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */ 144 ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */
144 ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */ 145 ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */
145 ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ 146 ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
146 ATA_DFLAG_INIT_MASK = (1 << 16) - 1, 147 ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
148 ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
147 149
148 ATA_DFLAG_DETACH = (1 << 16), 150 ATA_DFLAG_DETACH = (1 << 24),
149 ATA_DFLAG_DETACHED = (1 << 17), 151 ATA_DFLAG_DETACHED = (1 << 25),
150 152
151 ATA_DEV_UNKNOWN = 0, /* unknown device */ 153 ATA_DEV_UNKNOWN = 0, /* unknown device */
152 ATA_DEV_ATA = 1, /* ATA device */ 154 ATA_DEV_ATA = 1, /* ATA device */
@@ -217,9 +219,7 @@ enum {
217 219
218 /* struct ata_queued_cmd flags */ 220 /* struct ata_queued_cmd flags */
219 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ 221 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
220 ATA_QCFLAG_SG = (1 << 1), /* have s/g table? */ 222 ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */
221 ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */
222 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
223 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */ 223 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
224 ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ 224 ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
225 ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */ 225 ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */
@@ -266,19 +266,15 @@ enum {
266 PORT_DISABLED = 2, 266 PORT_DISABLED = 2,
267 267
268 /* encoding various smaller bitmaps into a single 268 /* encoding various smaller bitmaps into a single
269 * unsigned int bitmap 269 * unsigned long bitmap
270 */ 270 */
271 ATA_BITS_PIO = 7, 271 ATA_NR_PIO_MODES = 7,
272 ATA_BITS_MWDMA = 5, 272 ATA_NR_MWDMA_MODES = 5,
273 ATA_BITS_UDMA = 8, 273 ATA_NR_UDMA_MODES = 8,
274 274
275 ATA_SHIFT_PIO = 0, 275 ATA_SHIFT_PIO = 0,
276 ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_BITS_PIO, 276 ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_NR_PIO_MODES,
277 ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_BITS_MWDMA, 277 ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES,
278
279 ATA_MASK_PIO = ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO,
280 ATA_MASK_MWDMA = ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA,
281 ATA_MASK_UDMA = ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA,
282 278
283 /* size of buffer to pad xfers ending on unaligned boundaries */ 279 /* size of buffer to pad xfers ending on unaligned boundaries */
284 ATA_DMA_PAD_SZ = 4, 280 ATA_DMA_PAD_SZ = 4,
@@ -349,6 +345,21 @@ enum {
349 ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */ 345 ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */
350 ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */ 346 ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */
351 ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */ 347 ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */
348
349 /* ATAPI command types */
350 ATAPI_READ = 0, /* READs */
351 ATAPI_WRITE = 1, /* WRITEs */
352 ATAPI_READ_CD = 2, /* READ CD [MSF] */
353 ATAPI_MISC = 3, /* the rest */
354};
355
356enum ata_xfer_mask {
357 ATA_MASK_PIO = ((1LU << ATA_NR_PIO_MODES) - 1)
358 << ATA_SHIFT_PIO,
359 ATA_MASK_MWDMA = ((1LU << ATA_NR_MWDMA_MODES) - 1)
360 << ATA_SHIFT_MWDMA,
361 ATA_MASK_UDMA = ((1LU << ATA_NR_UDMA_MODES) - 1)
362 << ATA_SHIFT_UDMA,
352}; 363};
353 364
354enum hsm_task_states { 365enum hsm_task_states {
@@ -447,7 +458,7 @@ struct ata_queued_cmd {
447 unsigned int tag; 458 unsigned int tag;
448 unsigned int n_elem; 459 unsigned int n_elem;
449 unsigned int n_iter; 460 unsigned int n_iter;
450 unsigned int orig_n_elem; 461 unsigned int mapped_n_elem;
451 462
452 int dma_dir; 463 int dma_dir;
453 464
@@ -455,17 +466,18 @@ struct ata_queued_cmd {
455 unsigned int sect_size; 466 unsigned int sect_size;
456 467
457 unsigned int nbytes; 468 unsigned int nbytes;
469 unsigned int raw_nbytes;
458 unsigned int curbytes; 470 unsigned int curbytes;
459 471
460 struct scatterlist *cursg; 472 struct scatterlist *cursg;
461 unsigned int cursg_ofs; 473 unsigned int cursg_ofs;
462 474
475 struct scatterlist *last_sg;
476 struct scatterlist saved_last_sg;
463 struct scatterlist sgent; 477 struct scatterlist sgent;
464 struct scatterlist pad_sgent; 478 struct scatterlist extra_sg[2];
465 void *buf_virt;
466 479
467 /* DO NOT iterate over __sg manually, use ata_for_each_sg() */ 480 struct scatterlist *sg;
468 struct scatterlist *__sg;
469 481
470 unsigned int err_mask; 482 unsigned int err_mask;
471 struct ata_taskfile result_tf; 483 struct ata_taskfile result_tf;
@@ -482,7 +494,7 @@ struct ata_port_stats {
482}; 494};
483 495
484struct ata_ering_entry { 496struct ata_ering_entry {
485 int is_io; 497 unsigned int eflags;
486 unsigned int err_mask; 498 unsigned int err_mask;
487 u64 timestamp; 499 u64 timestamp;
488}; 500};
@@ -522,9 +534,9 @@ struct ata_device {
522 unsigned int cdb_len; 534 unsigned int cdb_len;
523 535
524 /* per-dev xfer mask */ 536 /* per-dev xfer mask */
525 unsigned int pio_mask; 537 unsigned long pio_mask;
526 unsigned int mwdma_mask; 538 unsigned long mwdma_mask;
527 unsigned int udma_mask; 539 unsigned long udma_mask;
528 540
529 /* for CHS addressing */ 541 /* for CHS addressing */
530 u16 cylinders; /* Number of cylinders */ 542 u16 cylinders; /* Number of cylinders */
@@ -560,6 +572,8 @@ struct ata_eh_context {
560 int tries[ATA_MAX_DEVICES]; 572 int tries[ATA_MAX_DEVICES];
561 unsigned int classes[ATA_MAX_DEVICES]; 573 unsigned int classes[ATA_MAX_DEVICES];
562 unsigned int did_probe_mask; 574 unsigned int did_probe_mask;
575 unsigned int saved_ncq_enabled;
576 u8 saved_xfer_mode[ATA_MAX_DEVICES];
563}; 577};
564 578
565struct ata_acpi_drive 579struct ata_acpi_drive
@@ -686,7 +700,8 @@ struct ata_port_operations {
686 void (*bmdma_setup) (struct ata_queued_cmd *qc); 700 void (*bmdma_setup) (struct ata_queued_cmd *qc);
687 void (*bmdma_start) (struct ata_queued_cmd *qc); 701 void (*bmdma_start) (struct ata_queued_cmd *qc);
688 702
689 void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int); 703 unsigned int (*data_xfer) (struct ata_device *dev, unsigned char *buf,
704 unsigned int buflen, int rw);
690 705
691 int (*qc_defer) (struct ata_queued_cmd *qc); 706 int (*qc_defer) (struct ata_queued_cmd *qc);
692 void (*qc_prep) (struct ata_queued_cmd *qc); 707 void (*qc_prep) (struct ata_queued_cmd *qc);
@@ -832,8 +847,6 @@ extern int ata_busy_sleep(struct ata_port *ap,
832 unsigned long timeout_pat, unsigned long timeout); 847 unsigned long timeout_pat, unsigned long timeout);
833extern void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline); 848extern void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline);
834extern int ata_wait_ready(struct ata_port *ap, unsigned long deadline); 849extern int ata_wait_ready(struct ata_port *ap, unsigned long deadline);
835extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn,
836 void *data, unsigned long delay);
837extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 850extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
838 unsigned long interval_msec, 851 unsigned long interval_msec,
839 unsigned long timeout_msec); 852 unsigned long timeout_msec);
@@ -848,6 +861,16 @@ extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
848extern void ata_tf_to_fis(const struct ata_taskfile *tf, 861extern void ata_tf_to_fis(const struct ata_taskfile *tf,
849 u8 pmp, int is_cmd, u8 *fis); 862 u8 pmp, int is_cmd, u8 *fis);
850extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); 863extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
864extern unsigned long ata_pack_xfermask(unsigned long pio_mask,
865 unsigned long mwdma_mask, unsigned long udma_mask);
866extern void ata_unpack_xfermask(unsigned long xfer_mask,
867 unsigned long *pio_mask, unsigned long *mwdma_mask,
868 unsigned long *udma_mask);
869extern u8 ata_xfer_mask2mode(unsigned long xfer_mask);
870extern unsigned long ata_xfer_mode2mask(u8 xfer_mode);
871extern int ata_xfer_mode2shift(unsigned long xfer_mode);
872extern const char *ata_mode_string(unsigned long xfer_mask);
873extern unsigned long ata_id_xfermask(const u16 *id);
851extern void ata_noop_dev_select(struct ata_port *ap, unsigned int device); 874extern void ata_noop_dev_select(struct ata_port *ap, unsigned int device);
852extern void ata_std_dev_select(struct ata_port *ap, unsigned int device); 875extern void ata_std_dev_select(struct ata_port *ap, unsigned int device);
853extern u8 ata_check_status(struct ata_port *ap); 876extern u8 ata_check_status(struct ata_port *ap);
@@ -856,17 +879,15 @@ extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
856extern int ata_port_start(struct ata_port *ap); 879extern int ata_port_start(struct ata_port *ap);
857extern int ata_sff_port_start(struct ata_port *ap); 880extern int ata_sff_port_start(struct ata_port *ap);
858extern irqreturn_t ata_interrupt(int irq, void *dev_instance); 881extern irqreturn_t ata_interrupt(int irq, void *dev_instance);
859extern void ata_data_xfer(struct ata_device *adev, unsigned char *buf, 882extern unsigned int ata_data_xfer(struct ata_device *dev,
860 unsigned int buflen, int write_data); 883 unsigned char *buf, unsigned int buflen, int rw);
861extern void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, 884extern unsigned int ata_data_xfer_noirq(struct ata_device *dev,
862 unsigned int buflen, int write_data); 885 unsigned char *buf, unsigned int buflen, int rw);
863extern int ata_std_qc_defer(struct ata_queued_cmd *qc); 886extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
864extern void ata_dumb_qc_prep(struct ata_queued_cmd *qc); 887extern void ata_dumb_qc_prep(struct ata_queued_cmd *qc);
865extern void ata_qc_prep(struct ata_queued_cmd *qc); 888extern void ata_qc_prep(struct ata_queued_cmd *qc);
866extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); 889extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
867extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc); 890extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
868extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
869 unsigned int buflen);
870extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 891extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
871 unsigned int n_elem); 892 unsigned int n_elem);
872extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); 893extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
@@ -875,7 +896,6 @@ extern void ata_id_string(const u16 *id, unsigned char *s,
875 unsigned int ofs, unsigned int len); 896 unsigned int ofs, unsigned int len);
876extern void ata_id_c_string(const u16 *id, unsigned char *s, 897extern void ata_id_c_string(const u16 *id, unsigned char *s,
877 unsigned int ofs, unsigned int len); 898 unsigned int ofs, unsigned int len);
878extern void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown);
879extern void ata_bmdma_setup(struct ata_queued_cmd *qc); 899extern void ata_bmdma_setup(struct ata_queued_cmd *qc);
880extern void ata_bmdma_start(struct ata_queued_cmd *qc); 900extern void ata_bmdma_start(struct ata_queued_cmd *qc);
881extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 901extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
@@ -910,6 +930,7 @@ extern u8 ata_irq_on(struct ata_port *ap);
910extern int ata_cable_40wire(struct ata_port *ap); 930extern int ata_cable_40wire(struct ata_port *ap);
911extern int ata_cable_80wire(struct ata_port *ap); 931extern int ata_cable_80wire(struct ata_port *ap);
912extern int ata_cable_sata(struct ata_port *ap); 932extern int ata_cable_sata(struct ata_port *ap);
933extern int ata_cable_ignore(struct ata_port *ap);
913extern int ata_cable_unknown(struct ata_port *ap); 934extern int ata_cable_unknown(struct ata_port *ap);
914 935
915/* 936/*
@@ -917,11 +938,13 @@ extern int ata_cable_unknown(struct ata_port *ap);
917 */ 938 */
918 939
919extern unsigned int ata_pio_need_iordy(const struct ata_device *); 940extern unsigned int ata_pio_need_iordy(const struct ata_device *);
941extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
920extern int ata_timing_compute(struct ata_device *, unsigned short, 942extern int ata_timing_compute(struct ata_device *, unsigned short,
921 struct ata_timing *, int, int); 943 struct ata_timing *, int, int);
922extern void ata_timing_merge(const struct ata_timing *, 944extern void ata_timing_merge(const struct ata_timing *,
923 const struct ata_timing *, struct ata_timing *, 945 const struct ata_timing *, struct ata_timing *,
924 unsigned int); 946 unsigned int);
947extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle);
925 948
926enum { 949enum {
927 ATA_TIMING_SETUP = (1 << 0), 950 ATA_TIMING_SETUP = (1 << 0),
@@ -948,15 +971,40 @@ static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
948 return &ap->__acpi_init_gtm; 971 return &ap->__acpi_init_gtm;
949 return NULL; 972 return NULL;
950} 973}
951extern int ata_acpi_cbl_80wire(struct ata_port *ap);
952int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm); 974int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm);
953int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm); 975int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm);
976unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev,
977 const struct ata_acpi_gtm *gtm);
978int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm);
954#else 979#else
955static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) 980static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
956{ 981{
957 return NULL; 982 return NULL;
958} 983}
959static inline int ata_acpi_cbl_80wire(struct ata_port *ap) { return 0; } 984
985static inline int ata_acpi_stm(const struct ata_port *ap,
986 struct ata_acpi_gtm *stm)
987{
988 return -ENOSYS;
989}
990
991static inline int ata_acpi_gtm(const struct ata_port *ap,
992 struct ata_acpi_gtm *stm)
993{
994 return -ENOSYS;
995}
996
997static inline unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
998 const struct ata_acpi_gtm *gtm)
999{
1000 return 0;
1001}
1002
1003static inline int ata_acpi_cbl_80wire(struct ata_port *ap,
1004 const struct ata_acpi_gtm *gtm)
1005{
1006 return 0;
1007}
960#endif 1008#endif
961 1009
962#ifdef CONFIG_PCI 1010#ifdef CONFIG_PCI
@@ -985,8 +1033,12 @@ extern int ata_pci_init_bmdma(struct ata_host *host);
985extern int ata_pci_prepare_sff_host(struct pci_dev *pdev, 1033extern int ata_pci_prepare_sff_host(struct pci_dev *pdev,
986 const struct ata_port_info * const * ppi, 1034 const struct ata_port_info * const * ppi,
987 struct ata_host **r_host); 1035 struct ata_host **r_host);
1036extern int ata_pci_activate_sff_host(struct ata_host *host,
1037 irq_handler_t irq_handler,
1038 struct scsi_host_template *sht);
988extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); 1039extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
989extern unsigned long ata_pci_default_filter(struct ata_device *, unsigned long); 1040extern unsigned long ata_pci_default_filter(struct ata_device *dev,
1041 unsigned long xfer_mask);
990#endif /* CONFIG_PCI */ 1042#endif /* CONFIG_PCI */
991 1043
992/* 1044/*
@@ -1074,35 +1126,6 @@ extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
1074 const char *name); 1126 const char *name);
1075#endif 1127#endif
1076 1128
1077/*
1078 * qc helpers
1079 */
1080static inline struct scatterlist *
1081ata_qc_first_sg(struct ata_queued_cmd *qc)
1082{
1083 qc->n_iter = 0;
1084 if (qc->n_elem)
1085 return qc->__sg;
1086 if (qc->pad_len)
1087 return &qc->pad_sgent;
1088 return NULL;
1089}
1090
1091static inline struct scatterlist *
1092ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
1093{
1094 if (sg == &qc->pad_sgent)
1095 return NULL;
1096 if (++qc->n_iter < qc->n_elem)
1097 return sg_next(sg);
1098 if (qc->pad_len)
1099 return &qc->pad_sgent;
1100 return NULL;
1101}
1102
1103#define ata_for_each_sg(sg, qc) \
1104 for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc))
1105
1106static inline unsigned int ata_tag_valid(unsigned int tag) 1129static inline unsigned int ata_tag_valid(unsigned int tag)
1107{ 1130{
1108 return (tag < ATA_MAX_QUEUE) ? 1 : 0; 1131 return (tag < ATA_MAX_QUEUE) ? 1 : 0;
@@ -1337,15 +1360,17 @@ static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
1337static inline void ata_qc_reinit(struct ata_queued_cmd *qc) 1360static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
1338{ 1361{
1339 qc->dma_dir = DMA_NONE; 1362 qc->dma_dir = DMA_NONE;
1340 qc->__sg = NULL; 1363 qc->sg = NULL;
1341 qc->flags = 0; 1364 qc->flags = 0;
1342 qc->cursg = NULL; 1365 qc->cursg = NULL;
1343 qc->cursg_ofs = 0; 1366 qc->cursg_ofs = 0;
1344 qc->nbytes = qc->curbytes = 0; 1367 qc->nbytes = qc->raw_nbytes = qc->curbytes = 0;
1345 qc->n_elem = 0; 1368 qc->n_elem = 0;
1369 qc->mapped_n_elem = 0;
1346 qc->n_iter = 0; 1370 qc->n_iter = 0;
1347 qc->err_mask = 0; 1371 qc->err_mask = 0;
1348 qc->pad_len = 0; 1372 qc->pad_len = 0;
1373 qc->last_sg = NULL;
1349 qc->sect_size = ATA_SECT_SIZE; 1374 qc->sect_size = ATA_SECT_SIZE;
1350 1375
1351 ata_tf_init(qc->dev, &qc->tf); 1376 ata_tf_init(qc->dev, &qc->tf);
@@ -1362,6 +1387,27 @@ static inline int ata_try_flush_cache(const struct ata_device *dev)
1362 ata_id_has_flush_ext(dev->id); 1387 ata_id_has_flush_ext(dev->id);
1363} 1388}
1364 1389
1390static inline int atapi_cmd_type(u8 opcode)
1391{
1392 switch (opcode) {
1393 case GPCMD_READ_10:
1394 case GPCMD_READ_12:
1395 return ATAPI_READ;
1396
1397 case GPCMD_WRITE_10:
1398 case GPCMD_WRITE_12:
1399 case GPCMD_WRITE_AND_VERIFY_10:
1400 return ATAPI_WRITE;
1401
1402 case GPCMD_READ_CD:
1403 case GPCMD_READ_CD_MSF:
1404 return ATAPI_READ_CD;
1405
1406 default:
1407 return ATAPI_MISC;
1408 }
1409}
1410
1365static inline unsigned int ac_err_mask(u8 status) 1411static inline unsigned int ac_err_mask(u8 status)
1366{ 1412{
1367 if (status & (ATA_BUSY | ATA_DRQ)) 1413 if (status & (ATA_BUSY | ATA_DRQ))
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 0c40cc0b4a36..5dfbc684ce7d 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -207,9 +207,7 @@ static inline int notifier_to_errno(int ret)
207#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ 207#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
208#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ 208#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
209#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ 209#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
210#define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */ 210#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
211#define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */
212#define CPU_DYING 0x000A /* CPU (unsigned)v not running any task,
213 * not handling interrupts, soon dead */ 211 * not handling interrupts, soon dead */
214 212
215/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend 213/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 7f2215139e9a..1fbd0256e86b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2066,6 +2066,9 @@
2066#define PCI_VENDOR_ID_NETCELL 0x169c 2066#define PCI_VENDOR_ID_NETCELL 0x169c
2067#define PCI_DEVICE_ID_REVOLUTION 0x0044 2067#define PCI_DEVICE_ID_REVOLUTION 0x0044
2068 2068
2069#define PCI_VENDOR_ID_CENATEK 0x16CA
2070#define PCI_DEVICE_ID_CENATEK_IDE 0x0001
2071
2069#define PCI_VENDOR_ID_VITESSE 0x1725 2072#define PCI_VENDOR_ID_VITESSE 0x1725
2070#define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174 2073#define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174
2071 2074
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
new file mode 100644
index 000000000000..4d6624260b4c
--- /dev/null
+++ b/include/linux/rcuclassic.h
@@ -0,0 +1,164 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion (classic version)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2001
19 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21 *
22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of Read-Copy Update mechanism see -
29 * Documentation/RCU
30 *
31 */
32
33#ifndef __LINUX_RCUCLASSIC_H
34#define __LINUX_RCUCLASSIC_H
35
36#ifdef __KERNEL__
37
38#include <linux/cache.h>
39#include <linux/spinlock.h>
40#include <linux/threads.h>
41#include <linux/percpu.h>
42#include <linux/cpumask.h>
43#include <linux/seqlock.h>
44
45
46/* Global control variables for rcupdate callback mechanism. */
47struct rcu_ctrlblk {
48 long cur; /* Current batch number. */
49 long completed; /* Number of the last completed batch */
50 int next_pending; /* Is the next batch already waiting? */
51
52 int signaled;
53
54 spinlock_t lock ____cacheline_internodealigned_in_smp;
55 cpumask_t cpumask; /* CPUs that need to switch in order */
56 /* for current batch to proceed. */
57} ____cacheline_internodealigned_in_smp;
58
59/* Is batch a before batch b ? */
60static inline int rcu_batch_before(long a, long b)
61{
62 return (a - b) < 0;
63}
64
65/* Is batch a after batch b ? */
66static inline int rcu_batch_after(long a, long b)
67{
68 return (a - b) > 0;
69}
70
71/*
72 * Per-CPU data for Read-Copy UPdate.
73 * nxtlist - new callbacks are added here
74 * curlist - current batch for which quiescent cycle started if any
75 */
76struct rcu_data {
77 /* 1) quiescent state handling : */
78 long quiescbatch; /* Batch # for grace period */
79 int passed_quiesc; /* User-mode/idle loop etc. */
80 int qs_pending; /* core waits for quiesc state */
81
82 /* 2) batch handling */
83 long batch; /* Batch # for current RCU batch */
84 struct rcu_head *nxtlist;
85 struct rcu_head **nxttail;
86 long qlen; /* # of queued callbacks */
87 struct rcu_head *curlist;
88 struct rcu_head **curtail;
89 struct rcu_head *donelist;
90 struct rcu_head **donetail;
91 long blimit; /* Upper limit on a processed batch */
92 int cpu;
93 struct rcu_head barrier;
94};
95
96DECLARE_PER_CPU(struct rcu_data, rcu_data);
97DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
98
99/*
100 * Increment the quiescent state counter.
101 * The counter is a bit degenerated: We do not need to know
102 * how many quiescent states passed, just if there was at least
103 * one since the start of the grace period. Thus just a flag.
104 */
105static inline void rcu_qsctr_inc(int cpu)
106{
107 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
108 rdp->passed_quiesc = 1;
109}
110static inline void rcu_bh_qsctr_inc(int cpu)
111{
112 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
113 rdp->passed_quiesc = 1;
114}
115
116extern int rcu_pending(int cpu);
117extern int rcu_needs_cpu(int cpu);
118
119#ifdef CONFIG_DEBUG_LOCK_ALLOC
120extern struct lockdep_map rcu_lock_map;
121# define rcu_read_acquire() \
122 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_)
123# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
124#else
125# define rcu_read_acquire() do { } while (0)
126# define rcu_read_release() do { } while (0)
127#endif
128
129#define __rcu_read_lock() \
130 do { \
131 preempt_disable(); \
132 __acquire(RCU); \
133 rcu_read_acquire(); \
134 } while (0)
135#define __rcu_read_unlock() \
136 do { \
137 rcu_read_release(); \
138 __release(RCU); \
139 preempt_enable(); \
140 } while (0)
141#define __rcu_read_lock_bh() \
142 do { \
143 local_bh_disable(); \
144 __acquire(RCU_BH); \
145 rcu_read_acquire(); \
146 } while (0)
147#define __rcu_read_unlock_bh() \
148 do { \
149 rcu_read_release(); \
150 __release(RCU_BH); \
151 local_bh_enable(); \
152 } while (0)
153
154#define __synchronize_sched() synchronize_rcu()
155
156extern void __rcu_init(void);
157extern void rcu_check_callbacks(int cpu, int user);
158extern void rcu_restart_cpu(int cpu);
159
160extern long rcu_batches_completed(void);
161extern long rcu_batches_completed_bh(void);
162
163#endif /* __KERNEL__ */
164#endif /* __LINUX_RCUCLASSIC_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index cc24a01df940..d32c14de270e 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -15,7 +15,7 @@
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * 17 *
18 * Copyright (C) IBM Corporation, 2001 18 * Copyright IBM Corporation, 2001
19 * 19 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com> 20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21 * 21 *
@@ -53,96 +53,18 @@ struct rcu_head {
53 void (*func)(struct rcu_head *head); 53 void (*func)(struct rcu_head *head);
54}; 54};
55 55
56#ifdef CONFIG_CLASSIC_RCU
57#include <linux/rcuclassic.h>
58#else /* #ifdef CONFIG_CLASSIC_RCU */
59#include <linux/rcupreempt.h>
60#endif /* #else #ifdef CONFIG_CLASSIC_RCU */
61
56#define RCU_HEAD_INIT { .next = NULL, .func = NULL } 62#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
57#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT 63#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
58#define INIT_RCU_HEAD(ptr) do { \ 64#define INIT_RCU_HEAD(ptr) do { \
59 (ptr)->next = NULL; (ptr)->func = NULL; \ 65 (ptr)->next = NULL; (ptr)->func = NULL; \
60} while (0) 66} while (0)
61 67
62
63
64/* Global control variables for rcupdate callback mechanism. */
65struct rcu_ctrlblk {
66 long cur; /* Current batch number. */
67 long completed; /* Number of the last completed batch */
68 int next_pending; /* Is the next batch already waiting? */
69
70 int signaled;
71
72 spinlock_t lock ____cacheline_internodealigned_in_smp;
73 cpumask_t cpumask; /* CPUs that need to switch in order */
74 /* for current batch to proceed. */
75} ____cacheline_internodealigned_in_smp;
76
77/* Is batch a before batch b ? */
78static inline int rcu_batch_before(long a, long b)
79{
80 return (a - b) < 0;
81}
82
83/* Is batch a after batch b ? */
84static inline int rcu_batch_after(long a, long b)
85{
86 return (a - b) > 0;
87}
88
89/*
90 * Per-CPU data for Read-Copy UPdate.
91 * nxtlist - new callbacks are added here
92 * curlist - current batch for which quiescent cycle started if any
93 */
94struct rcu_data {
95 /* 1) quiescent state handling : */
96 long quiescbatch; /* Batch # for grace period */
97 int passed_quiesc; /* User-mode/idle loop etc. */
98 int qs_pending; /* core waits for quiesc state */
99
100 /* 2) batch handling */
101 long batch; /* Batch # for current RCU batch */
102 struct rcu_head *nxtlist;
103 struct rcu_head **nxttail;
104 long qlen; /* # of queued callbacks */
105 struct rcu_head *curlist;
106 struct rcu_head **curtail;
107 struct rcu_head *donelist;
108 struct rcu_head **donetail;
109 long blimit; /* Upper limit on a processed batch */
110 int cpu;
111 struct rcu_head barrier;
112};
113
114DECLARE_PER_CPU(struct rcu_data, rcu_data);
115DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
116
117/*
118 * Increment the quiescent state counter.
119 * The counter is a bit degenerated: We do not need to know
120 * how many quiescent states passed, just if there was at least
121 * one since the start of the grace period. Thus just a flag.
122 */
123static inline void rcu_qsctr_inc(int cpu)
124{
125 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
126 rdp->passed_quiesc = 1;
127}
128static inline void rcu_bh_qsctr_inc(int cpu)
129{
130 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
131 rdp->passed_quiesc = 1;
132}
133
134extern int rcu_pending(int cpu);
135extern int rcu_needs_cpu(int cpu);
136
137#ifdef CONFIG_DEBUG_LOCK_ALLOC
138extern struct lockdep_map rcu_lock_map;
139# define rcu_read_acquire() lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_)
140# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
141#else
142# define rcu_read_acquire() do { } while (0)
143# define rcu_read_release() do { } while (0)
144#endif
145
146/** 68/**
147 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 69 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
148 * 70 *
@@ -172,24 +94,13 @@ extern struct lockdep_map rcu_lock_map;
172 * 94 *
173 * It is illegal to block while in an RCU read-side critical section. 95 * It is illegal to block while in an RCU read-side critical section.
174 */ 96 */
175#define rcu_read_lock() \ 97#define rcu_read_lock() __rcu_read_lock()
176 do { \
177 preempt_disable(); \
178 __acquire(RCU); \
179 rcu_read_acquire(); \
180 } while(0)
181 98
182/** 99/**
183 * rcu_read_unlock - marks the end of an RCU read-side critical section. 100 * rcu_read_unlock - marks the end of an RCU read-side critical section.
184 * 101 *
185 * See rcu_read_lock() for more information. 102 * See rcu_read_lock() for more information.
186 */ 103 */
187#define rcu_read_unlock() \
188 do { \
189 rcu_read_release(); \
190 __release(RCU); \
191 preempt_enable(); \
192 } while(0)
193 104
194/* 105/*
195 * So where is rcu_write_lock()? It does not exist, as there is no 106 * So where is rcu_write_lock()? It does not exist, as there is no
@@ -200,6 +111,7 @@ extern struct lockdep_map rcu_lock_map;
200 * used as well. RCU does not care how the writers keep out of each 111 * used as well. RCU does not care how the writers keep out of each
201 * others' way, as long as they do so. 112 * others' way, as long as they do so.
202 */ 113 */
114#define rcu_read_unlock() __rcu_read_unlock()
203 115
204/** 116/**
205 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section 117 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
@@ -212,24 +124,14 @@ extern struct lockdep_map rcu_lock_map;
212 * can use just rcu_read_lock(). 124 * can use just rcu_read_lock().
213 * 125 *
214 */ 126 */
215#define rcu_read_lock_bh() \ 127#define rcu_read_lock_bh() __rcu_read_lock_bh()
216 do { \
217 local_bh_disable(); \
218 __acquire(RCU_BH); \
219 rcu_read_acquire(); \
220 } while(0)
221 128
222/* 129/*
223 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section 130 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
224 * 131 *
225 * See rcu_read_lock_bh() for more information. 132 * See rcu_read_lock_bh() for more information.
226 */ 133 */
227#define rcu_read_unlock_bh() \ 134#define rcu_read_unlock_bh() __rcu_read_unlock_bh()
228 do { \
229 rcu_read_release(); \
230 __release(RCU_BH); \
231 local_bh_enable(); \
232 } while(0)
233 135
234/* 136/*
235 * Prevent the compiler from merging or refetching accesses. The compiler 137 * Prevent the compiler from merging or refetching accesses. The compiler
@@ -293,21 +195,52 @@ extern struct lockdep_map rcu_lock_map;
293 * In "classic RCU", these two guarantees happen to be one and 195 * In "classic RCU", these two guarantees happen to be one and
294 * the same, but can differ in realtime RCU implementations. 196 * the same, but can differ in realtime RCU implementations.
295 */ 197 */
296#define synchronize_sched() synchronize_rcu() 198#define synchronize_sched() __synchronize_sched()
297 199
298extern void rcu_init(void); 200/**
299extern void rcu_check_callbacks(int cpu, int user); 201 * call_rcu - Queue an RCU callback for invocation after a grace period.
300extern void rcu_restart_cpu(int cpu); 202 * @head: structure to be used for queueing the RCU updates.
301extern long rcu_batches_completed(void); 203 * @func: actual update function to be invoked after the grace period
302extern long rcu_batches_completed_bh(void); 204 *
205 * The update function will be invoked some time after a full grace
206 * period elapses, in other words after all currently executing RCU
207 * read-side critical sections have completed. RCU read-side critical
208 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
209 * and may be nested.
210 */
211extern void call_rcu(struct rcu_head *head,
212 void (*func)(struct rcu_head *head));
303 213
304/* Exported interfaces */ 214/**
305extern void FASTCALL(call_rcu(struct rcu_head *head, 215 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
306 void (*func)(struct rcu_head *head))); 216 * @head: structure to be used for queueing the RCU updates.
307extern void FASTCALL(call_rcu_bh(struct rcu_head *head, 217 * @func: actual update function to be invoked after the grace period
308 void (*func)(struct rcu_head *head))); 218 *
219 * The update function will be invoked some time after a full grace
220 * period elapses, in other words after all currently executing RCU
221 * read-side critical sections have completed. call_rcu_bh() assumes
222 * that the read-side critical sections end on completion of a softirq
223 * handler. This means that read-side critical sections in process
224 * context must not be interrupted by softirqs. This interface is to be
225 * used when most of the read-side critical sections are in softirq context.
226 * RCU read-side critical sections are delimited by :
227 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
228 * OR
229 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
230 * These may be nested.
231 */
232extern void call_rcu_bh(struct rcu_head *head,
233 void (*func)(struct rcu_head *head));
234
235/* Exported common interfaces */
309extern void synchronize_rcu(void); 236extern void synchronize_rcu(void);
310extern void rcu_barrier(void); 237extern void rcu_barrier(void);
238extern long rcu_batches_completed(void);
239extern long rcu_batches_completed_bh(void);
240
241/* Internal to kernel */
242extern void rcu_init(void);
243extern int rcu_needs_cpu(int cpu);
311 244
312#endif /* __KERNEL__ */ 245#endif /* __KERNEL__ */
313#endif /* __LINUX_RCUPDATE_H */ 246#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
new file mode 100644
index 000000000000..ece8eb3e4151
--- /dev/null
+++ b/include/linux/rcupreempt.h
@@ -0,0 +1,86 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion (RT implementation)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2006
19 *
20 * Author: Paul McKenney <paulmck@us.ibm.com>
21 *
22 * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of Read-Copy Update mechanism see -
29 * Documentation/RCU
30 *
31 */
32
33#ifndef __LINUX_RCUPREEMPT_H
34#define __LINUX_RCUPREEMPT_H
35
36#ifdef __KERNEL__
37
38#include <linux/cache.h>
39#include <linux/spinlock.h>
40#include <linux/threads.h>
41#include <linux/percpu.h>
42#include <linux/cpumask.h>
43#include <linux/seqlock.h>
44
45#define rcu_qsctr_inc(cpu)
46#define rcu_bh_qsctr_inc(cpu)
47#define call_rcu_bh(head, rcu) call_rcu(head, rcu)
48
49extern void __rcu_read_lock(void);
50extern void __rcu_read_unlock(void);
51extern int rcu_pending(int cpu);
52extern int rcu_needs_cpu(int cpu);
53
54#define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); }
55#define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); }
56
57extern void __synchronize_sched(void);
58
59extern void __rcu_init(void);
60extern void rcu_check_callbacks(int cpu, int user);
61extern void rcu_restart_cpu(int cpu);
62extern long rcu_batches_completed(void);
63
64/*
65 * Return the number of RCU batches processed thus far. Useful for debug
66 * and statistic. The _bh variant is identifcal to straight RCU
67 */
68static inline long rcu_batches_completed_bh(void)
69{
70 return rcu_batches_completed();
71}
72
73#ifdef CONFIG_RCU_TRACE
74struct rcupreempt_trace;
75extern long *rcupreempt_flipctr(int cpu);
76extern long rcupreempt_data_completed(void);
77extern int rcupreempt_flip_flag(int cpu);
78extern int rcupreempt_mb_flag(int cpu);
79extern char *rcupreempt_try_flip_state_name(void);
80extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
81#endif
82
83struct softirq_action;
84
85#endif /* __KERNEL__ */
86#endif /* __LINUX_RCUPREEMPT_H */
diff --git a/include/linux/rcupreempt_trace.h b/include/linux/rcupreempt_trace.h
new file mode 100644
index 000000000000..21cd6b2a5c42
--- /dev/null
+++ b/include/linux/rcupreempt_trace.h
@@ -0,0 +1,99 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion (RT implementation)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2006
19 *
20 * Author: Paul McKenney <paulmck@us.ibm.com>
21 *
22 * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of the Preemptible Read-Copy Update mechanism see -
29 * http://lwn.net/Articles/253651/
30 */
31
32#ifndef __LINUX_RCUPREEMPT_TRACE_H
33#define __LINUX_RCUPREEMPT_TRACE_H
34
35#ifdef __KERNEL__
36#include <linux/types.h>
37#include <linux/kernel.h>
38
39#include <asm/atomic.h>
40
41/*
42 * PREEMPT_RCU data structures.
43 */
44
45struct rcupreempt_trace {
46 long next_length;
47 long next_add;
48 long wait_length;
49 long wait_add;
50 long done_length;
51 long done_add;
52 long done_remove;
53 atomic_t done_invoked;
54 long rcu_check_callbacks;
55 atomic_t rcu_try_flip_1;
56 atomic_t rcu_try_flip_e1;
57 long rcu_try_flip_i1;
58 long rcu_try_flip_ie1;
59 long rcu_try_flip_g1;
60 long rcu_try_flip_a1;
61 long rcu_try_flip_ae1;
62 long rcu_try_flip_a2;
63 long rcu_try_flip_z1;
64 long rcu_try_flip_ze1;
65 long rcu_try_flip_z2;
66 long rcu_try_flip_m1;
67 long rcu_try_flip_me1;
68 long rcu_try_flip_m2;
69};
70
71#ifdef CONFIG_RCU_TRACE
72#define RCU_TRACE(fn, arg) fn(arg);
73#else
74#define RCU_TRACE(fn, arg)
75#endif
76
77extern void rcupreempt_trace_move2done(struct rcupreempt_trace *trace);
78extern void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace);
79extern void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace);
80extern void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace);
81extern void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace);
82extern void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace);
83extern void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace);
84extern void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace);
85extern void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace);
86extern void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace);
87extern void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace);
88extern void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace);
89extern void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace);
90extern void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace);
91extern void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace);
92extern void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace);
93extern void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace);
94extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace);
95extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace);
96extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace);
97
98#endif /* __KERNEL__ */
99#endif /* __LINUX_RCUPREEMPT_TRACE_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d6eacda765ca..df5b24ee80b3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -78,7 +78,6 @@ struct sched_param {
78#include <linux/proportions.h> 78#include <linux/proportions.h>
79#include <linux/seccomp.h> 79#include <linux/seccomp.h>
80#include <linux/rcupdate.h> 80#include <linux/rcupdate.h>
81#include <linux/futex.h>
82#include <linux/rtmutex.h> 81#include <linux/rtmutex.h>
83 82
84#include <linux/time.h> 83#include <linux/time.h>
@@ -88,11 +87,13 @@ struct sched_param {
88#include <linux/hrtimer.h> 87#include <linux/hrtimer.h>
89#include <linux/task_io_accounting.h> 88#include <linux/task_io_accounting.h>
90#include <linux/kobject.h> 89#include <linux/kobject.h>
90#include <linux/latencytop.h>
91 91
92#include <asm/processor.h> 92#include <asm/processor.h>
93 93
94struct exec_domain; 94struct exec_domain;
95struct futex_pi_state; 95struct futex_pi_state;
96struct robust_list_head;
96struct bio; 97struct bio;
97 98
98/* 99/*
@@ -230,6 +231,8 @@ static inline int select_nohz_load_balancer(int cpu)
230} 231}
231#endif 232#endif
232 233
234extern unsigned long rt_needs_cpu(int cpu);
235
233/* 236/*
234 * Only dump TASK_* tasks. (0 for all tasks) 237 * Only dump TASK_* tasks. (0 for all tasks)
235 */ 238 */
@@ -257,13 +260,19 @@ extern void trap_init(void);
257extern void account_process_tick(struct task_struct *task, int user); 260extern void account_process_tick(struct task_struct *task, int user);
258extern void update_process_times(int user); 261extern void update_process_times(int user);
259extern void scheduler_tick(void); 262extern void scheduler_tick(void);
263extern void hrtick_resched(void);
264
265extern void sched_show_task(struct task_struct *p);
260 266
261#ifdef CONFIG_DETECT_SOFTLOCKUP 267#ifdef CONFIG_DETECT_SOFTLOCKUP
262extern void softlockup_tick(void); 268extern void softlockup_tick(void);
263extern void spawn_softlockup_task(void); 269extern void spawn_softlockup_task(void);
264extern void touch_softlockup_watchdog(void); 270extern void touch_softlockup_watchdog(void);
265extern void touch_all_softlockup_watchdogs(void); 271extern void touch_all_softlockup_watchdogs(void);
266extern int softlockup_thresh; 272extern unsigned long softlockup_thresh;
273extern unsigned long sysctl_hung_task_check_count;
274extern unsigned long sysctl_hung_task_timeout_secs;
275extern unsigned long sysctl_hung_task_warnings;
267#else 276#else
268static inline void softlockup_tick(void) 277static inline void softlockup_tick(void)
269{ 278{
@@ -822,6 +831,7 @@ struct sched_class {
822 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); 831 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
823 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); 832 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
824 void (*yield_task) (struct rq *rq); 833 void (*yield_task) (struct rq *rq);
834 int (*select_task_rq)(struct task_struct *p, int sync);
825 835
826 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); 836 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
827 837
@@ -837,11 +847,25 @@ struct sched_class {
837 int (*move_one_task) (struct rq *this_rq, int this_cpu, 847 int (*move_one_task) (struct rq *this_rq, int this_cpu,
838 struct rq *busiest, struct sched_domain *sd, 848 struct rq *busiest, struct sched_domain *sd,
839 enum cpu_idle_type idle); 849 enum cpu_idle_type idle);
850 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
851 void (*post_schedule) (struct rq *this_rq);
852 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
840#endif 853#endif
841 854
842 void (*set_curr_task) (struct rq *rq); 855 void (*set_curr_task) (struct rq *rq);
843 void (*task_tick) (struct rq *rq, struct task_struct *p); 856 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
844 void (*task_new) (struct rq *rq, struct task_struct *p); 857 void (*task_new) (struct rq *rq, struct task_struct *p);
858 void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask);
859
860 void (*join_domain)(struct rq *rq);
861 void (*leave_domain)(struct rq *rq);
862
863 void (*switched_from) (struct rq *this_rq, struct task_struct *task,
864 int running);
865 void (*switched_to) (struct rq *this_rq, struct task_struct *task,
866 int running);
867 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
868 int oldprio, int running);
845}; 869};
846 870
847struct load_weight { 871struct load_weight {
@@ -871,6 +895,8 @@ struct sched_entity {
871#ifdef CONFIG_SCHEDSTATS 895#ifdef CONFIG_SCHEDSTATS
872 u64 wait_start; 896 u64 wait_start;
873 u64 wait_max; 897 u64 wait_max;
898 u64 wait_count;
899 u64 wait_sum;
874 900
875 u64 sleep_start; 901 u64 sleep_start;
876 u64 sleep_max; 902 u64 sleep_max;
@@ -909,6 +935,21 @@ struct sched_entity {
909#endif 935#endif
910}; 936};
911 937
938struct sched_rt_entity {
939 struct list_head run_list;
940 unsigned int time_slice;
941 unsigned long timeout;
942 int nr_cpus_allowed;
943
944#ifdef CONFIG_FAIR_GROUP_SCHED
945 struct sched_rt_entity *parent;
946 /* rq on which this entity is (to be) queued: */
947 struct rt_rq *rt_rq;
948 /* rq "owned" by this entity/group: */
949 struct rt_rq *my_q;
950#endif
951};
952
912struct task_struct { 953struct task_struct {
913 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 954 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
914 void *stack; 955 void *stack;
@@ -925,9 +966,9 @@ struct task_struct {
925#endif 966#endif
926 967
927 int prio, static_prio, normal_prio; 968 int prio, static_prio, normal_prio;
928 struct list_head run_list;
929 const struct sched_class *sched_class; 969 const struct sched_class *sched_class;
930 struct sched_entity se; 970 struct sched_entity se;
971 struct sched_rt_entity rt;
931 972
932#ifdef CONFIG_PREEMPT_NOTIFIERS 973#ifdef CONFIG_PREEMPT_NOTIFIERS
933 /* list of struct preempt_notifier: */ 974 /* list of struct preempt_notifier: */
@@ -951,7 +992,11 @@ struct task_struct {
951 992
952 unsigned int policy; 993 unsigned int policy;
953 cpumask_t cpus_allowed; 994 cpumask_t cpus_allowed;
954 unsigned int time_slice; 995
996#ifdef CONFIG_PREEMPT_RCU
997 int rcu_read_lock_nesting;
998 int rcu_flipctr_idx;
999#endif /* #ifdef CONFIG_PREEMPT_RCU */
955 1000
956#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1001#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
957 struct sched_info sched_info; 1002 struct sched_info sched_info;
@@ -1041,6 +1086,11 @@ struct task_struct {
1041/* ipc stuff */ 1086/* ipc stuff */
1042 struct sysv_sem sysvsem; 1087 struct sysv_sem sysvsem;
1043#endif 1088#endif
1089#ifdef CONFIG_DETECT_SOFTLOCKUP
1090/* hung task detection */
1091 unsigned long last_switch_timestamp;
1092 unsigned long last_switch_count;
1093#endif
1044/* CPU-specific state of this task */ 1094/* CPU-specific state of this task */
1045 struct thread_struct thread; 1095 struct thread_struct thread;
1046/* filesystem information */ 1096/* filesystem information */
@@ -1173,6 +1223,10 @@ struct task_struct {
1173 int make_it_fail; 1223 int make_it_fail;
1174#endif 1224#endif
1175 struct prop_local_single dirties; 1225 struct prop_local_single dirties;
1226#ifdef CONFIG_LATENCYTOP
1227 int latency_record_count;
1228 struct latency_record latency_record[LT_SAVECOUNT];
1229#endif
1176}; 1230};
1177 1231
1178/* 1232/*
@@ -1453,6 +1507,12 @@ extern unsigned int sysctl_sched_child_runs_first;
1453extern unsigned int sysctl_sched_features; 1507extern unsigned int sysctl_sched_features;
1454extern unsigned int sysctl_sched_migration_cost; 1508extern unsigned int sysctl_sched_migration_cost;
1455extern unsigned int sysctl_sched_nr_migrate; 1509extern unsigned int sysctl_sched_nr_migrate;
1510extern unsigned int sysctl_sched_rt_period;
1511extern unsigned int sysctl_sched_rt_ratio;
1512#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
1513extern unsigned int sysctl_sched_min_bal_int_shares;
1514extern unsigned int sysctl_sched_max_bal_int_shares;
1515#endif
1456 1516
1457int sched_nr_latency_handler(struct ctl_table *table, int write, 1517int sched_nr_latency_handler(struct ctl_table *table, int write,
1458 struct file *file, void __user *buffer, size_t *length, 1518 struct file *file, void __user *buffer, size_t *length,
@@ -1845,7 +1905,18 @@ static inline int need_resched(void)
1845 * cond_resched_lock() will drop the spinlock before scheduling, 1905 * cond_resched_lock() will drop the spinlock before scheduling,
1846 * cond_resched_softirq() will enable bhs before scheduling. 1906 * cond_resched_softirq() will enable bhs before scheduling.
1847 */ 1907 */
1848extern int cond_resched(void); 1908#ifdef CONFIG_PREEMPT
1909static inline int cond_resched(void)
1910{
1911 return 0;
1912}
1913#else
1914extern int _cond_resched(void);
1915static inline int cond_resched(void)
1916{
1917 return _cond_resched();
1918}
1919#endif
1849extern int cond_resched_lock(spinlock_t * lock); 1920extern int cond_resched_lock(spinlock_t * lock);
1850extern int cond_resched_softirq(void); 1921extern int cond_resched_softirq(void);
1851 1922
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index 58962c51dee1..aab3a4cff4e1 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -17,22 +17,10 @@ extern void __lockfunc __release_kernel_lock(void);
17 __release_kernel_lock(); \ 17 __release_kernel_lock(); \
18} while (0) 18} while (0)
19 19
20/*
21 * Non-SMP kernels will never block on the kernel lock,
22 * so we are better off returning a constant zero from
23 * reacquire_kernel_lock() so that the compiler can see
24 * it at compile-time.
25 */
26#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL)
27# define return_value_on_smp return
28#else
29# define return_value_on_smp
30#endif
31
32static inline int reacquire_kernel_lock(struct task_struct *task) 20static inline int reacquire_kernel_lock(struct task_struct *task)
33{ 21{
34 if (unlikely(task->lock_depth >= 0)) 22 if (unlikely(task->lock_depth >= 0))
35 return_value_on_smp __reacquire_kernel_lock(); 23 return __reacquire_kernel_lock();
36 return 0; 24 return 0;
37} 25}
38 26
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index e7fa657d0c49..5da9794b2d78 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -9,10 +9,13 @@ struct stack_trace {
9}; 9};
10 10
11extern void save_stack_trace(struct stack_trace *trace); 11extern void save_stack_trace(struct stack_trace *trace);
12extern void save_stack_trace_tsk(struct task_struct *tsk,
13 struct stack_trace *trace);
12 14
13extern void print_stack_trace(struct stack_trace *trace, int spaces); 15extern void print_stack_trace(struct stack_trace *trace, int spaces);
14#else 16#else
15# define save_stack_trace(trace) do { } while (0) 17# define save_stack_trace(trace) do { } while (0)
18# define save_stack_trace_tsk(tsk, trace) do { } while (0)
16# define print_stack_trace(trace, spaces) do { } while (0) 19# define print_stack_trace(trace, spaces) do { } while (0)
17#endif 20#endif
18 21
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 47729f18bfdf..2352f46160d3 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright (C) 2002, IBM Corp. 6 * Copyright (C) 2002, IBM Corp.
7 * 7 *
8 * All rights reserved. 8 * All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -103,6 +103,7 @@
103 .forkexec_idx = 0, \ 103 .forkexec_idx = 0, \
104 .flags = SD_LOAD_BALANCE \ 104 .flags = SD_LOAD_BALANCE \
105 | SD_BALANCE_NEWIDLE \ 105 | SD_BALANCE_NEWIDLE \
106 | SD_BALANCE_FORK \
106 | SD_BALANCE_EXEC \ 107 | SD_BALANCE_EXEC \
107 | SD_WAKE_AFFINE \ 108 | SD_WAKE_AFFINE \
108 | SD_WAKE_IDLE \ 109 | SD_WAKE_IDLE \
@@ -134,6 +135,7 @@
134 .forkexec_idx = 1, \ 135 .forkexec_idx = 1, \
135 .flags = SD_LOAD_BALANCE \ 136 .flags = SD_LOAD_BALANCE \
136 | SD_BALANCE_NEWIDLE \ 137 | SD_BALANCE_NEWIDLE \
138 | SD_BALANCE_FORK \
137 | SD_BALANCE_EXEC \ 139 | SD_BALANCE_EXEC \
138 | SD_WAKE_AFFINE \ 140 | SD_WAKE_AFFINE \
139 | SD_WAKE_IDLE \ 141 | SD_WAKE_IDLE \
@@ -165,6 +167,7 @@
165 .forkexec_idx = 1, \ 167 .forkexec_idx = 1, \
166 .flags = SD_LOAD_BALANCE \ 168 .flags = SD_LOAD_BALANCE \
167 | SD_BALANCE_NEWIDLE \ 169 | SD_BALANCE_NEWIDLE \
170 | SD_BALANCE_FORK \
168 | SD_BALANCE_EXEC \ 171 | SD_BALANCE_EXEC \
169 | SD_WAKE_AFFINE \ 172 | SD_WAKE_AFFINE \
170 | BALANCE_FOR_PKG_POWER,\ 173 | BALANCE_FOR_PKG_POWER,\