diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-18 16:00:54 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-18 16:00:54 -0400 |
commit | bb2c018b09b681d43f5e08124b83e362647ea82b (patch) | |
tree | d794902c78f9fdd04ed88a4b8d451ed6f9292ec0 /include/linux | |
parent | 82638844d9a8581bbf33201cc209a14876eca167 (diff) | |
parent | 5b664cb235e97afbf34db9c4d77f08ebd725335e (diff) |
Merge branch 'linus' into cpus4096
Conflicts:
drivers/acpi/processor_throttling.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
37 files changed, 773 insertions, 209 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 0601075d09a1..a17177639376 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -235,6 +235,9 @@ int acpi_check_region(resource_size_t start, resource_size_t n, | |||
235 | int acpi_check_mem_region(resource_size_t start, resource_size_t n, | 235 | int acpi_check_mem_region(resource_size_t start, resource_size_t n, |
236 | const char *name); | 236 | const char *name); |
237 | 237 | ||
238 | #ifdef CONFIG_PM_SLEEP | ||
239 | void __init acpi_old_suspend_ordering(void); | ||
240 | #endif /* CONFIG_PM_SLEEP */ | ||
238 | #else /* CONFIG_ACPI */ | 241 | #else /* CONFIG_ACPI */ |
239 | 242 | ||
240 | static inline int early_acpi_boot_init(void) | 243 | static inline int early_acpi_boot_init(void) |
diff --git a/include/linux/adb.h b/include/linux/adb.h index 64d8878e1444..63bca502fa55 100644 --- a/include/linux/adb.h +++ b/include/linux/adb.h | |||
@@ -84,7 +84,6 @@ enum adb_message { | |||
84 | ADB_MSG_PRE_RESET, /* Called before resetting the bus */ | 84 | ADB_MSG_PRE_RESET, /* Called before resetting the bus */ |
85 | ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */ | 85 | ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */ |
86 | }; | 86 | }; |
87 | extern struct adb_driver *adb_controller; | ||
88 | extern struct blocking_notifier_head adb_client_list; | 87 | extern struct blocking_notifier_head adb_client_list; |
89 | 88 | ||
90 | int adb_request(struct adb_request *req, void (*done)(struct adb_request *), | 89 | int adb_request(struct adb_request *req, void (*done)(struct adb_request *), |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 32a441b05fd5..88d68081a0f1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -985,6 +985,9 @@ static inline int bdev_integrity_enabled(struct block_device *bdev, int rw) | |||
985 | 985 | ||
986 | static inline int blk_integrity_rq(struct request *rq) | 986 | static inline int blk_integrity_rq(struct request *rq) |
987 | { | 987 | { |
988 | if (rq->bio == NULL) | ||
989 | return 0; | ||
990 | |||
988 | return bio_integrity(rq->bio); | 991 | return bio_integrity(rq->bio); |
989 | } | 992 | } |
990 | 993 | ||
diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 3ae65b1bf90f..0488f937634a 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h | |||
@@ -165,8 +165,8 @@ struct configfs_item_operations { | |||
165 | }; | 165 | }; |
166 | 166 | ||
167 | struct configfs_group_operations { | 167 | struct configfs_group_operations { |
168 | struct config_item *(*make_item)(struct config_group *group, const char *name); | 168 | int (*make_item)(struct config_group *group, const char *name, struct config_item **new_item); |
169 | struct config_group *(*make_group)(struct config_group *group, const char *name); | 169 | int (*make_group)(struct config_group *group, const char *name, struct config_group **new_group); |
170 | int (*commit_item)(struct config_item *item); | 170 | int (*commit_item)(struct config_item *item); |
171 | void (*disconnect_notify)(struct config_group *group, struct config_item *item); | 171 | void (*disconnect_notify)(struct config_group *group, struct config_item *item); |
172 | void (*drop_item)(struct config_group *group, struct config_item *item); | 172 | void (*drop_item)(struct config_group *group, struct config_item *item); |
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h new file mode 100644 index 000000000000..a9c96d865ee7 --- /dev/null +++ b/include/linux/crc-t10dif.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _LINUX_CRC_T10DIF_H | ||
2 | #define _LINUX_CRC_T10DIF_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | __u16 crc_t10dif(unsigned char const *, size_t); | ||
7 | |||
8 | #endif | ||
diff --git a/include/linux/device.h b/include/linux/device.h index 6a2d04c011bc..f71a78d123ae 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -68,6 +68,8 @@ struct bus_type { | |||
68 | int (*resume_early)(struct device *dev); | 68 | int (*resume_early)(struct device *dev); |
69 | int (*resume)(struct device *dev); | 69 | int (*resume)(struct device *dev); |
70 | 70 | ||
71 | struct pm_ext_ops *pm; | ||
72 | |||
71 | struct bus_type_private *p; | 73 | struct bus_type_private *p; |
72 | }; | 74 | }; |
73 | 75 | ||
@@ -131,6 +133,8 @@ struct device_driver { | |||
131 | int (*resume) (struct device *dev); | 133 | int (*resume) (struct device *dev); |
132 | struct attribute_group **groups; | 134 | struct attribute_group **groups; |
133 | 135 | ||
136 | struct pm_ops *pm; | ||
137 | |||
134 | struct driver_private *p; | 138 | struct driver_private *p; |
135 | }; | 139 | }; |
136 | 140 | ||
@@ -197,6 +201,8 @@ struct class { | |||
197 | 201 | ||
198 | int (*suspend)(struct device *dev, pm_message_t state); | 202 | int (*suspend)(struct device *dev, pm_message_t state); |
199 | int (*resume)(struct device *dev); | 203 | int (*resume)(struct device *dev); |
204 | |||
205 | struct pm_ops *pm; | ||
200 | }; | 206 | }; |
201 | 207 | ||
202 | extern int __must_check class_register(struct class *class); | 208 | extern int __must_check class_register(struct class *class); |
@@ -248,8 +254,11 @@ struct device_type { | |||
248 | struct attribute_group **groups; | 254 | struct attribute_group **groups; |
249 | int (*uevent)(struct device *dev, struct kobj_uevent_env *env); | 255 | int (*uevent)(struct device *dev, struct kobj_uevent_env *env); |
250 | void (*release)(struct device *dev); | 256 | void (*release)(struct device *dev); |
257 | |||
251 | int (*suspend)(struct device *dev, pm_message_t state); | 258 | int (*suspend)(struct device *dev, pm_message_t state); |
252 | int (*resume)(struct device *dev); | 259 | int (*resume)(struct device *dev); |
260 | |||
261 | struct pm_ops *pm; | ||
253 | }; | 262 | }; |
254 | 263 | ||
255 | /* interface for exporting device attributes */ | 264 | /* interface for exporting device attributes */ |
diff --git a/include/linux/elf.h b/include/linux/elf.h index ff9fbed90123..edc3dac3f02f 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h | |||
@@ -358,6 +358,7 @@ typedef struct elf64_shdr { | |||
358 | #define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */ | 358 | #define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */ |
359 | #define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */ | 359 | #define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */ |
360 | #define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */ | 360 | #define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */ |
361 | #define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ | ||
361 | #define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ | 362 | #define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ |
362 | 363 | ||
363 | 364 | ||
diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 08934995c7ab..deddeedf3257 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h | |||
@@ -128,6 +128,15 @@ static inline void set_freezable(void) | |||
128 | } | 128 | } |
129 | 129 | ||
130 | /* | 130 | /* |
131 | * Tell the freezer that the current task should be frozen by it and that it | ||
132 | * should send a fake signal to the task to freeze it. | ||
133 | */ | ||
134 | static inline void set_freezable_with_signal(void) | ||
135 | { | ||
136 | current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG); | ||
137 | } | ||
138 | |||
139 | /* | ||
131 | * Freezer-friendly wrappers around wait_event_interruptible() and | 140 | * Freezer-friendly wrappers around wait_event_interruptible() and |
132 | * wait_event_interruptible_timeout(), originally defined in <linux/wait.h> | 141 | * wait_event_interruptible_timeout(), originally defined in <linux/wait.h> |
133 | */ | 142 | */ |
@@ -174,6 +183,7 @@ static inline void freezer_do_not_count(void) {} | |||
174 | static inline void freezer_count(void) {} | 183 | static inline void freezer_count(void) {} |
175 | static inline int freezer_should_skip(struct task_struct *p) { return 0; } | 184 | static inline int freezer_should_skip(struct task_struct *p) { return 0; } |
176 | static inline void set_freezable(void) {} | 185 | static inline void set_freezable(void) {} |
186 | static inline void set_freezable_with_signal(void) {} | ||
177 | 187 | ||
178 | #define wait_event_freezable(wq, condition) \ | 188 | #define wait_event_freezable(wq, condition) \ |
179 | wait_event_interruptible(wq, condition) | 189 | wait_event_interruptible(wq, condition) |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 52e510a0aec2..c6455dadb21b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -1729,6 +1729,8 @@ static inline void invalidate_remote_inode(struct inode *inode) | |||
1729 | extern int invalidate_inode_pages2(struct address_space *mapping); | 1729 | extern int invalidate_inode_pages2(struct address_space *mapping); |
1730 | extern int invalidate_inode_pages2_range(struct address_space *mapping, | 1730 | extern int invalidate_inode_pages2_range(struct address_space *mapping, |
1731 | pgoff_t start, pgoff_t end); | 1731 | pgoff_t start, pgoff_t end); |
1732 | extern void generic_sync_sb_inodes(struct super_block *sb, | ||
1733 | struct writeback_control *wbc); | ||
1732 | extern int write_inode_now(struct inode *, int); | 1734 | extern int write_inode_now(struct inode *, int); |
1733 | extern int filemap_fdatawrite(struct address_space *); | 1735 | extern int filemap_fdatawrite(struct address_space *); |
1734 | extern int filemap_flush(struct address_space *); | 1736 | extern int filemap_flush(struct address_space *); |
diff --git a/include/linux/ide.h b/include/linux/ide.h index ac4eeb2932ef..4726126f5a59 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -139,6 +139,12 @@ struct ide_io_ports { | |||
139 | #define WAIT_MIN_SLEEP (2*HZ/100) /* 20msec - minimum sleep time */ | 139 | #define WAIT_MIN_SLEEP (2*HZ/100) /* 20msec - minimum sleep time */ |
140 | 140 | ||
141 | /* | 141 | /* |
142 | * Op codes for special requests to be handled by ide_special_rq(). | ||
143 | * Values should be in the range of 0x20 to 0x3f. | ||
144 | */ | ||
145 | #define REQ_DRIVE_RESET 0x20 | ||
146 | |||
147 | /* | ||
142 | * Check for an interrupt and acknowledge the interrupt status | 148 | * Check for an interrupt and acknowledge the interrupt status |
143 | */ | 149 | */ |
144 | struct hwif_s; | 150 | struct hwif_s; |
@@ -171,7 +177,7 @@ typedef struct hw_regs_s { | |||
171 | int irq; /* our irq number */ | 177 | int irq; /* our irq number */ |
172 | ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ | 178 | ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ |
173 | hwif_chipset_t chipset; | 179 | hwif_chipset_t chipset; |
174 | struct device *dev; | 180 | struct device *dev, *parent; |
175 | } hw_regs_t; | 181 | } hw_regs_t; |
176 | 182 | ||
177 | void ide_init_port_data(struct hwif_s *, unsigned int); | 183 | void ide_init_port_data(struct hwif_s *, unsigned int); |
@@ -405,8 +411,8 @@ typedef struct ide_drive_s { | |||
405 | struct ide_port_info; | 411 | struct ide_port_info; |
406 | 412 | ||
407 | struct ide_port_ops { | 413 | struct ide_port_ops { |
408 | /* host specific initialization of devices on a port */ | 414 | /* host specific initialization of a device */ |
409 | void (*port_init_devs)(struct hwif_s *); | 415 | void (*init_dev)(ide_drive_t *); |
410 | /* routine to program host for PIO mode */ | 416 | /* routine to program host for PIO mode */ |
411 | void (*set_pio_mode)(ide_drive_t *, const u8); | 417 | void (*set_pio_mode)(ide_drive_t *, const u8); |
412 | /* routine to program host for DMA mode */ | 418 | /* routine to program host for DMA mode */ |
@@ -565,8 +571,6 @@ typedef struct hwgroup_s { | |||
565 | unsigned int sleeping : 1; | 571 | unsigned int sleeping : 1; |
566 | /* BOOL: polling active & poll_timeout field valid */ | 572 | /* BOOL: polling active & poll_timeout field valid */ |
567 | unsigned int polling : 1; | 573 | unsigned int polling : 1; |
568 | /* BOOL: in a polling reset situation. Must not trigger another reset yet */ | ||
569 | unsigned int resetting : 1; | ||
570 | 574 | ||
571 | /* current drive */ | 575 | /* current drive */ |
572 | ide_drive_t *drive; | 576 | ide_drive_t *drive; |
@@ -786,7 +790,6 @@ struct ide_driver_s { | |||
786 | ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t); | 790 | ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t); |
787 | int (*end_request)(ide_drive_t *, int, int); | 791 | int (*end_request)(ide_drive_t *, int, int); |
788 | ide_startstop_t (*error)(ide_drive_t *, struct request *rq, u8, u8); | 792 | ide_startstop_t (*error)(ide_drive_t *, struct request *rq, u8, u8); |
789 | ide_startstop_t (*abort)(ide_drive_t *, struct request *rq); | ||
790 | struct device_driver gen_driver; | 793 | struct device_driver gen_driver; |
791 | int (*probe)(ide_drive_t *); | 794 | int (*probe)(ide_drive_t *); |
792 | void (*remove)(ide_drive_t *); | 795 | void (*remove)(ide_drive_t *); |
@@ -801,18 +804,6 @@ struct ide_driver_s { | |||
801 | 804 | ||
802 | int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsigned, unsigned long); | 805 | int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsigned, unsigned long); |
803 | 806 | ||
804 | /* | ||
805 | * ide_hwifs[] is the master data structure used to keep track | ||
806 | * of just about everything in ide.c. Whenever possible, routines | ||
807 | * should be using pointers to a drive (ide_drive_t *) or | ||
808 | * pointers to a hwif (ide_hwif_t *), rather than indexing this | ||
809 | * structure directly (the allocation/layout may change!). | ||
810 | * | ||
811 | */ | ||
812 | #ifndef _IDE_C | ||
813 | extern ide_hwif_t ide_hwifs[]; /* master data repository */ | ||
814 | #endif | ||
815 | |||
816 | extern int ide_vlb_clk; | 807 | extern int ide_vlb_clk; |
817 | extern int ide_pci_clk; | 808 | extern int ide_pci_clk; |
818 | 809 | ||
@@ -840,10 +831,6 @@ ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8); | |||
840 | 831 | ||
841 | ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, byte stat); | 832 | ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, byte stat); |
842 | 833 | ||
843 | ide_startstop_t __ide_abort(ide_drive_t *, struct request *); | ||
844 | |||
845 | extern ide_startstop_t ide_abort(ide_drive_t *, const char *); | ||
846 | |||
847 | extern void ide_fix_driveid(struct hd_driveid *); | 834 | extern void ide_fix_driveid(struct hd_driveid *); |
848 | 835 | ||
849 | extern void ide_fixstring(u8 *, const int, const int); | 836 | extern void ide_fixstring(u8 *, const int, const int); |
@@ -1271,16 +1258,43 @@ static inline int ide_dev_is_sata(struct hd_driveid *id) | |||
1271 | u64 ide_get_lba_addr(struct ide_taskfile *, int); | 1258 | u64 ide_get_lba_addr(struct ide_taskfile *, int); |
1272 | u8 ide_dump_status(ide_drive_t *, const char *, u8); | 1259 | u8 ide_dump_status(ide_drive_t *, const char *, u8); |
1273 | 1260 | ||
1274 | typedef struct ide_pio_timings_s { | 1261 | struct ide_timing { |
1275 | int setup_time; /* Address setup (ns) minimum */ | 1262 | u8 mode; |
1276 | int active_time; /* Active pulse (ns) minimum */ | 1263 | u8 setup; /* t1 */ |
1277 | int cycle_time; /* Cycle time (ns) minimum = */ | 1264 | u16 act8b; /* t2 for 8-bit io */ |
1278 | /* active + recovery (+ setup for some chips) */ | 1265 | u16 rec8b; /* t2i for 8-bit io */ |
1279 | } ide_pio_timings_t; | 1266 | u16 cyc8b; /* t0 for 8-bit io */ |
1267 | u16 active; /* t2 or tD */ | ||
1268 | u16 recover; /* t2i or tK */ | ||
1269 | u16 cycle; /* t0 */ | ||
1270 | u16 udma; /* t2CYCTYP/2 */ | ||
1271 | }; | ||
1272 | |||
1273 | enum { | ||
1274 | IDE_TIMING_SETUP = (1 << 0), | ||
1275 | IDE_TIMING_ACT8B = (1 << 1), | ||
1276 | IDE_TIMING_REC8B = (1 << 2), | ||
1277 | IDE_TIMING_CYC8B = (1 << 3), | ||
1278 | IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B | | ||
1279 | IDE_TIMING_CYC8B, | ||
1280 | IDE_TIMING_ACTIVE = (1 << 4), | ||
1281 | IDE_TIMING_RECOVER = (1 << 5), | ||
1282 | IDE_TIMING_CYCLE = (1 << 6), | ||
1283 | IDE_TIMING_UDMA = (1 << 7), | ||
1284 | IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT | | ||
1285 | IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER | | ||
1286 | IDE_TIMING_CYCLE | IDE_TIMING_UDMA, | ||
1287 | }; | ||
1288 | |||
1289 | struct ide_timing *ide_timing_find_mode(u8); | ||
1290 | u16 ide_pio_cycle_time(ide_drive_t *, u8); | ||
1291 | void ide_timing_merge(struct ide_timing *, struct ide_timing *, | ||
1292 | struct ide_timing *, unsigned int); | ||
1293 | int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int); | ||
1294 | |||
1295 | int ide_scan_pio_blacklist(char *); | ||
1280 | 1296 | ||
1281 | unsigned int ide_pio_cycle_time(ide_drive_t *, u8); | ||
1282 | u8 ide_get_best_pio_mode(ide_drive_t *, u8, u8); | 1297 | u8 ide_get_best_pio_mode(ide_drive_t *, u8, u8); |
1283 | extern const ide_pio_timings_t ide_pio_timings[6]; | ||
1284 | 1298 | ||
1285 | int ide_set_pio_mode(ide_drive_t *, u8); | 1299 | int ide_set_pio_mode(ide_drive_t *, u8); |
1286 | int ide_set_dma_mode(ide_drive_t *, u8); | 1300 | int ide_set_dma_mode(ide_drive_t *, u8); |
diff --git a/include/linux/inet.h b/include/linux/inet.h index 1354080cf8cf..4cca05c9678e 100644 --- a/include/linux/inet.h +++ b/include/linux/inet.h | |||
@@ -44,6 +44,13 @@ | |||
44 | 44 | ||
45 | #include <linux/types.h> | 45 | #include <linux/types.h> |
46 | 46 | ||
47 | /* | ||
48 | * These mimic similar macros defined in user-space for inet_ntop(3). | ||
49 | * See /usr/include/netinet/in.h . | ||
50 | */ | ||
51 | #define INET_ADDRSTRLEN (16) | ||
52 | #define INET6_ADDRSTRLEN (48) | ||
53 | |||
47 | extern __be32 in_aton(const char *str); | 54 | extern __be32 in_aton(const char *str); |
48 | extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end); | 55 | extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end); |
49 | extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end); | 56 | extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end); |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 9927a88674a3..93c45acf249a 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -140,8 +140,8 @@ extern struct group_info init_groups; | |||
140 | .nr_cpus_allowed = NR_CPUS, \ | 140 | .nr_cpus_allowed = NR_CPUS, \ |
141 | }, \ | 141 | }, \ |
142 | .tasks = LIST_HEAD_INIT(tsk.tasks), \ | 142 | .tasks = LIST_HEAD_INIT(tsk.tasks), \ |
143 | .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ | 143 | .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ |
144 | .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \ | 144 | .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ |
145 | .real_parent = &tsk, \ | 145 | .real_parent = &tsk, \ |
146 | .parent = &tsk, \ | 146 | .parent = &tsk, \ |
147 | .children = LIST_HEAD_INIT(tsk.children), \ | 147 | .children = LIST_HEAD_INIT(tsk.children), \ |
diff --git a/include/linux/ioport.h b/include/linux/ioport.h index c6801bffe76d..2cd07cc29687 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h | |||
@@ -59,6 +59,7 @@ struct resource_list { | |||
59 | #define IORESOURCE_IRQ_HIGHLEVEL (1<<2) | 59 | #define IORESOURCE_IRQ_HIGHLEVEL (1<<2) |
60 | #define IORESOURCE_IRQ_LOWLEVEL (1<<3) | 60 | #define IORESOURCE_IRQ_LOWLEVEL (1<<3) |
61 | #define IORESOURCE_IRQ_SHAREABLE (1<<4) | 61 | #define IORESOURCE_IRQ_SHAREABLE (1<<4) |
62 | #define IORESOURCE_IRQ_OPTIONAL (1<<5) | ||
62 | 63 | ||
63 | /* PnP DMA specific bits (IORESOURCE_BITS) */ | 64 | /* PnP DMA specific bits (IORESOURCE_BITS) */ |
64 | #define IORESOURCE_DMA_TYPE_MASK (3<<0) | 65 | #define IORESOURCE_DMA_TYPE_MASK (3<<0) |
@@ -88,6 +89,10 @@ struct resource_list { | |||
88 | #define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ | 89 | #define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ |
89 | #define IORESOURCE_MEM_EXPANSIONROM (1<<6) | 90 | #define IORESOURCE_MEM_EXPANSIONROM (1<<6) |
90 | 91 | ||
92 | /* PnP I/O specific bits (IORESOURCE_BITS) */ | ||
93 | #define IORESOURCE_IO_16BIT_ADDR (1<<0) | ||
94 | #define IORESOURCE_IO_FIXED (1<<1) | ||
95 | |||
91 | /* PCI ROM control bits (IORESOURCE_BITS) */ | 96 | /* PCI ROM control bits (IORESOURCE_BITS) */ |
92 | #define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ | 97 | #define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ |
93 | #define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */ | 98 | #define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index cf1cd3a2ed78..2128ef7780c6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -108,6 +108,7 @@ extern unsigned int kobjsize(const void *objp); | |||
108 | 108 | ||
109 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ | 109 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ |
110 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ | 110 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ |
111 | #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ | ||
111 | 112 | ||
112 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ | 113 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ |
113 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS | 114 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS |
diff --git a/include/linux/mman.h b/include/linux/mman.h index dab8892e6ff1..30d1073bac3b 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h | |||
@@ -34,6 +34,32 @@ static inline void vm_unacct_memory(long pages) | |||
34 | } | 34 | } |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * Allow architectures to handle additional protection bits | ||
38 | */ | ||
39 | |||
40 | #ifndef arch_calc_vm_prot_bits | ||
41 | #define arch_calc_vm_prot_bits(prot) 0 | ||
42 | #endif | ||
43 | |||
44 | #ifndef arch_vm_get_page_prot | ||
45 | #define arch_vm_get_page_prot(vm_flags) __pgprot(0) | ||
46 | #endif | ||
47 | |||
48 | #ifndef arch_validate_prot | ||
49 | /* | ||
50 | * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have | ||
51 | * already been masked out. | ||
52 | * | ||
53 | * Returns true if the prot flags are valid | ||
54 | */ | ||
55 | static inline int arch_validate_prot(unsigned long prot) | ||
56 | { | ||
57 | return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; | ||
58 | } | ||
59 | #define arch_validate_prot arch_validate_prot | ||
60 | #endif | ||
61 | |||
62 | /* | ||
37 | * Optimisation macro. It is equivalent to: | 63 | * Optimisation macro. It is equivalent to: |
38 | * (x & bit1) ? bit2 : 0 | 64 | * (x & bit1) ? bit2 : 0 |
39 | * but this version is faster. | 65 | * but this version is faster. |
@@ -51,7 +77,8 @@ calc_vm_prot_bits(unsigned long prot) | |||
51 | { | 77 | { |
52 | return _calc_vm_trans(prot, PROT_READ, VM_READ ) | | 78 | return _calc_vm_trans(prot, PROT_READ, VM_READ ) | |
53 | _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | | 79 | _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | |
54 | _calc_vm_trans(prot, PROT_EXEC, VM_EXEC ); | 80 | _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | |
81 | arch_calc_vm_prot_bits(prot); | ||
55 | } | 82 | } |
56 | 83 | ||
57 | /* | 84 | /* |
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index d0c3abed74c2..143cebf0586f 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h | |||
@@ -135,6 +135,7 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, | |||
135 | struct mmc_command *, int); | 135 | struct mmc_command *, int); |
136 | 136 | ||
137 | extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *); | 137 | extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *); |
138 | extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int); | ||
138 | 139 | ||
139 | extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); | 140 | extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); |
140 | extern void mmc_release_host(struct mmc_host *host); | 141 | extern void mmc_release_host(struct mmc_host *host); |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 7ab962fa1d73..10a2080086ca 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -51,8 +51,30 @@ struct mmc_ios { | |||
51 | 51 | ||
52 | struct mmc_host_ops { | 52 | struct mmc_host_ops { |
53 | void (*request)(struct mmc_host *host, struct mmc_request *req); | 53 | void (*request)(struct mmc_host *host, struct mmc_request *req); |
54 | /* | ||
55 | * Avoid calling these three functions too often or in a "fast path", | ||
56 | * since underlaying controller might implement them in an expensive | ||
57 | * and/or slow way. | ||
58 | * | ||
59 | * Also note that these functions might sleep, so don't call them | ||
60 | * in the atomic contexts! | ||
61 | * | ||
62 | * Return values for the get_ro callback should be: | ||
63 | * 0 for a read/write card | ||
64 | * 1 for a read-only card | ||
65 | * -ENOSYS when not supported (equal to NULL callback) | ||
66 | * or a negative errno value when something bad happened | ||
67 | * | ||
68 | * Return values for the get_ro callback should be: | ||
69 | * 0 for a absent card | ||
70 | * 1 for a present card | ||
71 | * -ENOSYS when not supported (equal to NULL callback) | ||
72 | * or a negative errno value when something bad happened | ||
73 | */ | ||
54 | void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios); | 74 | void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios); |
55 | int (*get_ro)(struct mmc_host *host); | 75 | int (*get_ro)(struct mmc_host *host); |
76 | int (*get_cd)(struct mmc_host *host); | ||
77 | |||
56 | void (*enable_sdio_irq)(struct mmc_host *host, int enable); | 78 | void (*enable_sdio_irq)(struct mmc_host *host, int enable); |
57 | }; | 79 | }; |
58 | 80 | ||
@@ -89,11 +111,11 @@ struct mmc_host { | |||
89 | unsigned long caps; /* Host capabilities */ | 111 | unsigned long caps; /* Host capabilities */ |
90 | 112 | ||
91 | #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */ | 113 | #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */ |
92 | #define MMC_CAP_MULTIWRITE (1 << 1) /* Can accurately report bytes sent to card on error */ | 114 | #define MMC_CAP_MMC_HIGHSPEED (1 << 1) /* Can do MMC high-speed timing */ |
93 | #define MMC_CAP_MMC_HIGHSPEED (1 << 2) /* Can do MMC high-speed timing */ | 115 | #define MMC_CAP_SD_HIGHSPEED (1 << 2) /* Can do SD high-speed timing */ |
94 | #define MMC_CAP_SD_HIGHSPEED (1 << 3) /* Can do SD high-speed timing */ | 116 | #define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */ |
95 | #define MMC_CAP_SDIO_IRQ (1 << 4) /* Can signal pending SDIO IRQs */ | 117 | #define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */ |
96 | #define MMC_CAP_SPI (1 << 5) /* Talks only SPI protocols */ | 118 | #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */ |
97 | 119 | ||
98 | /* host specific block data */ | 120 | /* host specific block data */ |
99 | unsigned int max_seg_size; /* see blk_queue_max_segment_size */ | 121 | unsigned int max_seg_size; /* see blk_queue_max_segment_size */ |
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 4236fbf0b6fb..14b81f3e5232 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h | |||
@@ -16,7 +16,6 @@ | |||
16 | * Based strongly on code by: | 16 | * Based strongly on code by: |
17 | * | 17 | * |
18 | * Author: Yong-iL Joh <tolkien@mizi.com> | 18 | * Author: Yong-iL Joh <tolkien@mizi.com> |
19 | * Date : $Date: 2002/06/18 12:37:30 $ | ||
20 | * | 19 | * |
21 | * Author: Andrew Christian | 20 | * Author: Andrew Christian |
22 | * 15 May 2002 | 21 | * 15 May 2002 |
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index b050f4d7b41f..07bee4a0d457 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * include/linux/mmc/sdio_func.h | 2 | * include/linux/mmc/sdio_func.h |
3 | * | 3 | * |
4 | * Copyright 2007 Pierre Ossman | 4 | * Copyright 2007-2008 Pierre Ossman |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
@@ -46,6 +46,8 @@ struct sdio_func { | |||
46 | unsigned max_blksize; /* maximum block size */ | 46 | unsigned max_blksize; /* maximum block size */ |
47 | unsigned cur_blksize; /* current block size */ | 47 | unsigned cur_blksize; /* current block size */ |
48 | 48 | ||
49 | unsigned enable_timeout; /* max enable timeout in msec */ | ||
50 | |||
49 | unsigned int state; /* function state */ | 51 | unsigned int state; /* function state */ |
50 | #define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */ | 52 | #define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */ |
51 | 53 | ||
@@ -120,23 +122,22 @@ extern int sdio_set_block_size(struct sdio_func *func, unsigned blksz); | |||
120 | extern int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler); | 122 | extern int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler); |
121 | extern int sdio_release_irq(struct sdio_func *func); | 123 | extern int sdio_release_irq(struct sdio_func *func); |
122 | 124 | ||
123 | extern unsigned char sdio_readb(struct sdio_func *func, | 125 | extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz); |
124 | unsigned int addr, int *err_ret); | 126 | |
125 | extern unsigned short sdio_readw(struct sdio_func *func, | 127 | extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret); |
126 | unsigned int addr, int *err_ret); | 128 | extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret); |
127 | extern unsigned long sdio_readl(struct sdio_func *func, | 129 | extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret); |
128 | unsigned int addr, int *err_ret); | ||
129 | 130 | ||
130 | extern int sdio_memcpy_fromio(struct sdio_func *func, void *dst, | 131 | extern int sdio_memcpy_fromio(struct sdio_func *func, void *dst, |
131 | unsigned int addr, int count); | 132 | unsigned int addr, int count); |
132 | extern int sdio_readsb(struct sdio_func *func, void *dst, | 133 | extern int sdio_readsb(struct sdio_func *func, void *dst, |
133 | unsigned int addr, int count); | 134 | unsigned int addr, int count); |
134 | 135 | ||
135 | extern void sdio_writeb(struct sdio_func *func, unsigned char b, | 136 | extern void sdio_writeb(struct sdio_func *func, u8 b, |
136 | unsigned int addr, int *err_ret); | 137 | unsigned int addr, int *err_ret); |
137 | extern void sdio_writew(struct sdio_func *func, unsigned short b, | 138 | extern void sdio_writew(struct sdio_func *func, u16 b, |
138 | unsigned int addr, int *err_ret); | 139 | unsigned int addr, int *err_ret); |
139 | extern void sdio_writel(struct sdio_func *func, unsigned long b, | 140 | extern void sdio_writel(struct sdio_func *func, u32 b, |
140 | unsigned int addr, int *err_ret); | 141 | unsigned int addr, int *err_ret); |
141 | 142 | ||
142 | extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr, | 143 | extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr, |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 27d6a8d98cef..29d261918734 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -12,9 +12,19 @@ | |||
12 | #include <linux/magic.h> | 12 | #include <linux/magic.h> |
13 | 13 | ||
14 | /* Default timeout values */ | 14 | /* Default timeout values */ |
15 | #define NFS_DEF_UDP_TIMEO (11) | ||
16 | #define NFS_DEF_UDP_RETRANS (3) | ||
17 | #define NFS_DEF_TCP_TIMEO (600) | ||
18 | #define NFS_DEF_TCP_RETRANS (2) | ||
19 | |||
15 | #define NFS_MAX_UDP_TIMEOUT (60*HZ) | 20 | #define NFS_MAX_UDP_TIMEOUT (60*HZ) |
16 | #define NFS_MAX_TCP_TIMEOUT (600*HZ) | 21 | #define NFS_MAX_TCP_TIMEOUT (600*HZ) |
17 | 22 | ||
23 | #define NFS_DEF_ACREGMIN (3) | ||
24 | #define NFS_DEF_ACREGMAX (60) | ||
25 | #define NFS_DEF_ACDIRMIN (30) | ||
26 | #define NFS_DEF_ACDIRMAX (60) | ||
27 | |||
18 | /* | 28 | /* |
19 | * When flushing a cluster of dirty pages, there can be different | 29 | * When flushing a cluster of dirty pages, there can be different |
20 | * strategies: | 30 | * strategies: |
diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h new file mode 100644 index 000000000000..1cb9a3fed2b3 --- /dev/null +++ b/include/linux/nfs_iostat.h | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | * User-space visible declarations for NFS client per-mount | ||
3 | * point statistics | ||
4 | * | ||
5 | * Copyright (C) 2005, 2006 Chuck Lever <cel@netapp.com> | ||
6 | * | ||
7 | * NFS client per-mount statistics provide information about the | ||
8 | * health of the NFS client and the health of each NFS mount point. | ||
9 | * Generally these are not for detailed problem diagnosis, but | ||
10 | * simply to indicate that there is a problem. | ||
11 | * | ||
12 | * These counters are not meant to be human-readable, but are meant | ||
13 | * to be integrated into system monitoring tools such as "sar" and | ||
14 | * "iostat". As such, the counters are sampled by the tools over | ||
15 | * time, and are never zeroed after a file system is mounted. | ||
16 | * Moving averages can be computed by the tools by taking the | ||
17 | * difference between two instantaneous samples and dividing that | ||
18 | * by the time between the samples. | ||
19 | */ | ||
20 | |||
21 | #ifndef _LINUX_NFS_IOSTAT | ||
22 | #define _LINUX_NFS_IOSTAT | ||
23 | |||
24 | #define NFS_IOSTAT_VERS "1.0" | ||
25 | |||
26 | /* | ||
27 | * NFS byte counters | ||
28 | * | ||
29 | * 1. SERVER - the number of payload bytes read from or written | ||
30 | * to the server by the NFS client via an NFS READ or WRITE | ||
31 | * request. | ||
32 | * | ||
33 | * 2. NORMAL - the number of bytes read or written by applications | ||
34 | * via the read(2) and write(2) system call interfaces. | ||
35 | * | ||
36 | * 3. DIRECT - the number of bytes read or written from files | ||
37 | * opened with the O_DIRECT flag. | ||
38 | * | ||
39 | * These counters give a view of the data throughput into and out | ||
40 | * of the NFS client. Comparing the number of bytes requested by | ||
41 | * an application with the number of bytes the client requests from | ||
42 | * the server can provide an indication of client efficiency | ||
43 | * (per-op, cache hits, etc). | ||
44 | * | ||
45 | * These counters can also help characterize which access methods | ||
46 | * are in use. DIRECT by itself shows whether there is any O_DIRECT | ||
47 | * traffic. NORMAL + DIRECT shows how much data is going through | ||
48 | * the system call interface. A large amount of SERVER traffic | ||
49 | * without much NORMAL or DIRECT traffic shows that applications | ||
50 | * are using mapped files. | ||
51 | * | ||
52 | * NFS page counters | ||
53 | * | ||
54 | * These count the number of pages read or written via nfs_readpage(), | ||
55 | * nfs_readpages(), or their write equivalents. | ||
56 | * | ||
57 | * NB: When adding new byte counters, please include the measured | ||
58 | * units in the name of each byte counter to help users of this | ||
59 | * interface determine what exactly is being counted. | ||
60 | */ | ||
61 | enum nfs_stat_bytecounters { | ||
62 | NFSIOS_NORMALREADBYTES = 0, | ||
63 | NFSIOS_NORMALWRITTENBYTES, | ||
64 | NFSIOS_DIRECTREADBYTES, | ||
65 | NFSIOS_DIRECTWRITTENBYTES, | ||
66 | NFSIOS_SERVERREADBYTES, | ||
67 | NFSIOS_SERVERWRITTENBYTES, | ||
68 | NFSIOS_READPAGES, | ||
69 | NFSIOS_WRITEPAGES, | ||
70 | __NFSIOS_BYTESMAX, | ||
71 | }; | ||
72 | |||
73 | /* | ||
74 | * NFS event counters | ||
75 | * | ||
76 | * These counters provide a low-overhead way of monitoring client | ||
77 | * activity without enabling NFS trace debugging. The counters | ||
78 | * show the rate at which VFS requests are made, and how often the | ||
79 | * client invalidates its data and attribute caches. This allows | ||
80 | * system administrators to monitor such things as how close-to-open | ||
81 | * is working, and answer questions such as "why are there so many | ||
82 | * GETATTR requests on the wire?" | ||
83 | * | ||
84 | * They also count anamolous events such as short reads and writes, | ||
85 | * silly renames due to close-after-delete, and operations that | ||
86 | * change the size of a file (such operations can often be the | ||
87 | * source of data corruption if applications aren't using file | ||
88 | * locking properly). | ||
89 | */ | ||
90 | enum nfs_stat_eventcounters { | ||
91 | NFSIOS_INODEREVALIDATE = 0, | ||
92 | NFSIOS_DENTRYREVALIDATE, | ||
93 | NFSIOS_DATAINVALIDATE, | ||
94 | NFSIOS_ATTRINVALIDATE, | ||
95 | NFSIOS_VFSOPEN, | ||
96 | NFSIOS_VFSLOOKUP, | ||
97 | NFSIOS_VFSACCESS, | ||
98 | NFSIOS_VFSUPDATEPAGE, | ||
99 | NFSIOS_VFSREADPAGE, | ||
100 | NFSIOS_VFSREADPAGES, | ||
101 | NFSIOS_VFSWRITEPAGE, | ||
102 | NFSIOS_VFSWRITEPAGES, | ||
103 | NFSIOS_VFSGETDENTS, | ||
104 | NFSIOS_VFSSETATTR, | ||
105 | NFSIOS_VFSFLUSH, | ||
106 | NFSIOS_VFSFSYNC, | ||
107 | NFSIOS_VFSLOCK, | ||
108 | NFSIOS_VFSRELEASE, | ||
109 | NFSIOS_CONGESTIONWAIT, | ||
110 | NFSIOS_SETATTRTRUNC, | ||
111 | NFSIOS_EXTENDWRITE, | ||
112 | NFSIOS_SILLYRENAME, | ||
113 | NFSIOS_SHORTREAD, | ||
114 | NFSIOS_SHORTWRITE, | ||
115 | NFSIOS_DELAY, | ||
116 | __NFSIOS_COUNTSMAX, | ||
117 | }; | ||
118 | |||
119 | #endif /* _LINUX_NFS_IOSTAT */ | ||
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index a1676e19e491..3c60685d972b 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h | |||
@@ -27,9 +27,12 @@ | |||
27 | /* | 27 | /* |
28 | * Valid flags for a dirty buffer | 28 | * Valid flags for a dirty buffer |
29 | */ | 29 | */ |
30 | #define PG_BUSY 0 | 30 | enum { |
31 | #define PG_NEED_COMMIT 1 | 31 | PG_BUSY = 0, |
32 | #define PG_NEED_RESCHED 2 | 32 | PG_CLEAN, |
33 | PG_NEED_COMMIT, | ||
34 | PG_NEED_RESCHED, | ||
35 | }; | ||
33 | 36 | ||
34 | struct nfs_inode; | 37 | struct nfs_inode; |
35 | struct nfs_page { | 38 | struct nfs_page { |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 24263bb8e0be..8c77c11224d1 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -829,9 +829,8 @@ struct nfs_rpc_ops { | |||
829 | int (*write_done) (struct rpc_task *, struct nfs_write_data *); | 829 | int (*write_done) (struct rpc_task *, struct nfs_write_data *); |
830 | void (*commit_setup) (struct nfs_write_data *, struct rpc_message *); | 830 | void (*commit_setup) (struct nfs_write_data *, struct rpc_message *); |
831 | int (*commit_done) (struct rpc_task *, struct nfs_write_data *); | 831 | int (*commit_done) (struct rpc_task *, struct nfs_write_data *); |
832 | int (*file_open) (struct inode *, struct file *); | ||
833 | int (*file_release) (struct inode *, struct file *); | ||
834 | int (*lock)(struct file *, int, struct file_lock *); | 832 | int (*lock)(struct file *, int, struct file_lock *); |
833 | int (*lock_check_bounds)(const struct file_lock *); | ||
835 | void (*clear_acl_cache)(struct inode *); | 834 | void (*clear_acl_cache)(struct inode *); |
836 | }; | 835 | }; |
837 | 836 | ||
diff --git a/include/linux/of_device.h b/include/linux/of_device.h index afe338217d91..d3a74e00a3e1 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h | |||
@@ -24,4 +24,7 @@ static inline void of_device_free(struct of_device *dev) | |||
24 | of_release_dev(&dev->dev); | 24 | of_release_dev(&dev->dev); |
25 | } | 25 | } |
26 | 26 | ||
27 | extern ssize_t of_device_get_modalias(struct of_device *ofdev, | ||
28 | char *str, ssize_t len); | ||
29 | |||
27 | #endif /* _LINUX_OF_DEVICE_H */ | 30 | #endif /* _LINUX_OF_DEVICE_H */ |
diff --git a/include/linux/pci.h b/include/linux/pci.h index d18b1dd49fab..a6a088e1a804 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -17,8 +17,7 @@ | |||
17 | #ifndef LINUX_PCI_H | 17 | #ifndef LINUX_PCI_H |
18 | #define LINUX_PCI_H | 18 | #define LINUX_PCI_H |
19 | 19 | ||
20 | /* Include the pci register defines */ | 20 | #include <linux/pci_regs.h> /* The pci register defines */ |
21 | #include <linux/pci_regs.h> | ||
22 | 21 | ||
23 | /* | 22 | /* |
24 | * The PCI interface treats multi-function devices as independent | 23 | * The PCI interface treats multi-function devices as independent |
@@ -49,12 +48,22 @@ | |||
49 | #include <linux/list.h> | 48 | #include <linux/list.h> |
50 | #include <linux/compiler.h> | 49 | #include <linux/compiler.h> |
51 | #include <linux/errno.h> | 50 | #include <linux/errno.h> |
51 | #include <linux/kobject.h> | ||
52 | #include <asm/atomic.h> | 52 | #include <asm/atomic.h> |
53 | #include <linux/device.h> | 53 | #include <linux/device.h> |
54 | 54 | ||
55 | /* Include the ID list */ | 55 | /* Include the ID list */ |
56 | #include <linux/pci_ids.h> | 56 | #include <linux/pci_ids.h> |
57 | 57 | ||
58 | /* pci_slot represents a physical slot */ | ||
59 | struct pci_slot { | ||
60 | struct pci_bus *bus; /* The bus this slot is on */ | ||
61 | struct list_head list; /* node in list of slots on this bus */ | ||
62 | struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */ | ||
63 | unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ | ||
64 | struct kobject kobj; | ||
65 | }; | ||
66 | |||
58 | /* File state for mmap()s on /proc/bus/pci/X/Y */ | 67 | /* File state for mmap()s on /proc/bus/pci/X/Y */ |
59 | enum pci_mmap_state { | 68 | enum pci_mmap_state { |
60 | pci_mmap_io, | 69 | pci_mmap_io, |
@@ -142,6 +151,7 @@ struct pci_dev { | |||
142 | 151 | ||
143 | void *sysdata; /* hook for sys-specific extension */ | 152 | void *sysdata; /* hook for sys-specific extension */ |
144 | struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ | 153 | struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ |
154 | struct pci_slot *slot; /* Physical slot this device is in */ | ||
145 | 155 | ||
146 | unsigned int devfn; /* encoded device & function index */ | 156 | unsigned int devfn; /* encoded device & function index */ |
147 | unsigned short vendor; | 157 | unsigned short vendor; |
@@ -167,6 +177,13 @@ struct pci_dev { | |||
167 | pci_power_t current_state; /* Current operating state. In ACPI-speak, | 177 | pci_power_t current_state; /* Current operating state. In ACPI-speak, |
168 | this is D0-D3, D0 being fully functional, | 178 | this is D0-D3, D0 being fully functional, |
169 | and D3 being off. */ | 179 | and D3 being off. */ |
180 | int pm_cap; /* PM capability offset in the | ||
181 | configuration space */ | ||
182 | unsigned int pme_support:5; /* Bitmask of states from which PME# | ||
183 | can be generated */ | ||
184 | unsigned int d1_support:1; /* Low power state D1 is supported */ | ||
185 | unsigned int d2_support:1; /* Low power state D2 is supported */ | ||
186 | unsigned int no_d1d2:1; /* Only allow D0 and D3 */ | ||
170 | 187 | ||
171 | #ifdef CONFIG_PCIEASPM | 188 | #ifdef CONFIG_PCIEASPM |
172 | struct pcie_link_state *link_state; /* ASPM link state. */ | 189 | struct pcie_link_state *link_state; /* ASPM link state. */ |
@@ -191,7 +208,6 @@ struct pci_dev { | |||
191 | unsigned int is_added:1; | 208 | unsigned int is_added:1; |
192 | unsigned int is_busmaster:1; /* device is busmaster */ | 209 | unsigned int is_busmaster:1; /* device is busmaster */ |
193 | unsigned int no_msi:1; /* device may not use msi */ | 210 | unsigned int no_msi:1; /* device may not use msi */ |
194 | unsigned int no_d1d2:1; /* only allow d0 or d3 */ | ||
195 | unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ | 211 | unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ |
196 | unsigned int broken_parity_status:1; /* Device generates false positive parity */ | 212 | unsigned int broken_parity_status:1; /* Device generates false positive parity */ |
197 | unsigned int msi_enabled:1; | 213 | unsigned int msi_enabled:1; |
@@ -267,6 +283,7 @@ struct pci_bus { | |||
267 | struct list_head children; /* list of child buses */ | 283 | struct list_head children; /* list of child buses */ |
268 | struct list_head devices; /* list of devices on this bus */ | 284 | struct list_head devices; /* list of devices on this bus */ |
269 | struct pci_dev *self; /* bridge device as seen by parent */ | 285 | struct pci_dev *self; /* bridge device as seen by parent */ |
286 | struct list_head slots; /* list of slots on this bus */ | ||
270 | struct resource *resource[PCI_BUS_NUM_RESOURCES]; | 287 | struct resource *resource[PCI_BUS_NUM_RESOURCES]; |
271 | /* address space routed to this bus */ | 288 | /* address space routed to this bus */ |
272 | 289 | ||
@@ -328,7 +345,7 @@ struct pci_bus_region { | |||
328 | struct pci_dynids { | 345 | struct pci_dynids { |
329 | spinlock_t lock; /* protects list, index */ | 346 | spinlock_t lock; /* protects list, index */ |
330 | struct list_head list; /* for IDs added at runtime */ | 347 | struct list_head list; /* for IDs added at runtime */ |
331 | unsigned int use_driver_data:1; /* pci_driver->driver_data is used */ | 348 | unsigned int use_driver_data:1; /* pci_device_id->driver_data is used */ |
332 | }; | 349 | }; |
333 | 350 | ||
334 | /* ---------------------------------------------------------------- */ | 351 | /* ---------------------------------------------------------------- */ |
@@ -390,7 +407,7 @@ struct pci_driver { | |||
390 | int (*resume_early) (struct pci_dev *dev); | 407 | int (*resume_early) (struct pci_dev *dev); |
391 | int (*resume) (struct pci_dev *dev); /* Device woken up */ | 408 | int (*resume) (struct pci_dev *dev); /* Device woken up */ |
392 | void (*shutdown) (struct pci_dev *dev); | 409 | void (*shutdown) (struct pci_dev *dev); |
393 | 410 | struct pm_ext_ops *pm; | |
394 | struct pci_error_handlers *err_handler; | 411 | struct pci_error_handlers *err_handler; |
395 | struct device_driver driver; | 412 | struct device_driver driver; |
396 | struct pci_dynids dynids; | 413 | struct pci_dynids dynids; |
@@ -489,6 +506,10 @@ struct pci_bus *pci_create_bus(struct device *parent, int bus, | |||
489 | struct pci_ops *ops, void *sysdata); | 506 | struct pci_ops *ops, void *sysdata); |
490 | struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, | 507 | struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, |
491 | int busnr); | 508 | int busnr); |
509 | struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, | ||
510 | const char *name); | ||
511 | void pci_destroy_slot(struct pci_slot *slot); | ||
512 | void pci_update_slot_number(struct pci_slot *slot, int slot_nr); | ||
492 | int pci_scan_slot(struct pci_bus *bus, int devfn); | 513 | int pci_scan_slot(struct pci_bus *bus, int devfn); |
493 | struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); | 514 | struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); |
494 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); | 515 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); |
@@ -618,6 +639,8 @@ int pci_restore_state(struct pci_dev *dev); | |||
618 | int pci_set_power_state(struct pci_dev *dev, pci_power_t state); | 639 | int pci_set_power_state(struct pci_dev *dev, pci_power_t state); |
619 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); | 640 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); |
620 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); | 641 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); |
642 | int pci_prepare_to_sleep(struct pci_dev *dev); | ||
643 | int pci_back_from_sleep(struct pci_dev *dev); | ||
621 | 644 | ||
622 | /* Functions for PCI Hotplug drivers to use */ | 645 | /* Functions for PCI Hotplug drivers to use */ |
623 | int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); | 646 | int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); |
@@ -839,6 +862,11 @@ static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) | |||
839 | return -EIO; | 862 | return -EIO; |
840 | } | 863 | } |
841 | 864 | ||
865 | static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | ||
866 | { | ||
867 | return -EIO; | ||
868 | } | ||
869 | |||
842 | static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, | 870 | static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, |
843 | unsigned int size) | 871 | unsigned int size) |
844 | { | 872 | { |
@@ -977,9 +1005,9 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data) | |||
977 | /* If you want to know what to call your pci_dev, ask this function. | 1005 | /* If you want to know what to call your pci_dev, ask this function. |
978 | * Again, it's a wrapper around the generic device. | 1006 | * Again, it's a wrapper around the generic device. |
979 | */ | 1007 | */ |
980 | static inline char *pci_name(struct pci_dev *pdev) | 1008 | static inline const char *pci_name(struct pci_dev *pdev) |
981 | { | 1009 | { |
982 | return pdev->dev.bus_id; | 1010 | return dev_name(&pdev->dev); |
983 | } | 1011 | } |
984 | 1012 | ||
985 | 1013 | ||
@@ -1014,7 +1042,9 @@ enum pci_fixup_pass { | |||
1014 | pci_fixup_header, /* After reading configuration header */ | 1042 | pci_fixup_header, /* After reading configuration header */ |
1015 | pci_fixup_final, /* Final phase of device fixups */ | 1043 | pci_fixup_final, /* Final phase of device fixups */ |
1016 | pci_fixup_enable, /* pci_enable_device() time */ | 1044 | pci_fixup_enable, /* pci_enable_device() time */ |
1017 | pci_fixup_resume, /* pci_enable_device() time */ | 1045 | pci_fixup_resume, /* pci_device_resume() */ |
1046 | pci_fixup_suspend, /* pci_device_suspend */ | ||
1047 | pci_fixup_resume_early, /* pci_device_resume_early() */ | ||
1018 | }; | 1048 | }; |
1019 | 1049 | ||
1020 | /* Anonymous variables would be nice... */ | 1050 | /* Anonymous variables would be nice... */ |
@@ -1036,6 +1066,12 @@ enum pci_fixup_pass { | |||
1036 | #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ | 1066 | #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ |
1037 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ | 1067 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ |
1038 | resume##vendor##device##hook, vendor, device, hook) | 1068 | resume##vendor##device##hook, vendor, device, hook) |
1069 | #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ | ||
1070 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ | ||
1071 | resume_early##vendor##device##hook, vendor, device, hook) | ||
1072 | #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ | ||
1073 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ | ||
1074 | suspend##vendor##device##hook, vendor, device, hook) | ||
1039 | 1075 | ||
1040 | 1076 | ||
1041 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); | 1077 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); |
@@ -1060,7 +1096,10 @@ extern int pci_pci_problems; | |||
1060 | extern unsigned long pci_cardbus_io_size; | 1096 | extern unsigned long pci_cardbus_io_size; |
1061 | extern unsigned long pci_cardbus_mem_size; | 1097 | extern unsigned long pci_cardbus_mem_size; |
1062 | 1098 | ||
1063 | extern int pcibios_add_platform_entries(struct pci_dev *dev); | 1099 | int pcibios_add_platform_entries(struct pci_dev *dev); |
1100 | void pcibios_disable_device(struct pci_dev *dev); | ||
1101 | int pcibios_set_pcie_reset_state(struct pci_dev *dev, | ||
1102 | enum pcie_reset_state state); | ||
1064 | 1103 | ||
1065 | #ifdef CONFIG_PCI_MMCONFIG | 1104 | #ifdef CONFIG_PCI_MMCONFIG |
1066 | extern void __init pci_mmcfg_early_init(void); | 1105 | extern void __init pci_mmcfg_early_init(void); |
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 8f67e8f2a3cc..a08cd06b541a 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h | |||
@@ -95,9 +95,6 @@ struct hotplug_slot_attribute { | |||
95 | * @get_adapter_status: Called to get see if an adapter is present in the slot or not. | 95 | * @get_adapter_status: Called to get see if an adapter is present in the slot or not. |
96 | * If this field is NULL, the value passed in the struct hotplug_slot_info | 96 | * If this field is NULL, the value passed in the struct hotplug_slot_info |
97 | * will be used when this value is requested by a user. | 97 | * will be used when this value is requested by a user. |
98 | * @get_address: Called to get pci address of a slot. | ||
99 | * If this field is NULL, the value passed in the struct hotplug_slot_info | ||
100 | * will be used when this value is requested by a user. | ||
101 | * @get_max_bus_speed: Called to get the max bus speed for a slot. | 98 | * @get_max_bus_speed: Called to get the max bus speed for a slot. |
102 | * If this field is NULL, the value passed in the struct hotplug_slot_info | 99 | * If this field is NULL, the value passed in the struct hotplug_slot_info |
103 | * will be used when this value is requested by a user. | 100 | * will be used when this value is requested by a user. |
@@ -120,7 +117,6 @@ struct hotplug_slot_ops { | |||
120 | int (*get_attention_status) (struct hotplug_slot *slot, u8 *value); | 117 | int (*get_attention_status) (struct hotplug_slot *slot, u8 *value); |
121 | int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); | 118 | int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); |
122 | int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); | 119 | int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); |
123 | int (*get_address) (struct hotplug_slot *slot, u32 *value); | ||
124 | int (*get_max_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value); | 120 | int (*get_max_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value); |
125 | int (*get_cur_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value); | 121 | int (*get_cur_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value); |
126 | }; | 122 | }; |
@@ -140,7 +136,6 @@ struct hotplug_slot_info { | |||
140 | u8 attention_status; | 136 | u8 attention_status; |
141 | u8 latch_status; | 137 | u8 latch_status; |
142 | u8 adapter_status; | 138 | u8 adapter_status; |
143 | u32 address; | ||
144 | enum pci_bus_speed max_bus_speed; | 139 | enum pci_bus_speed max_bus_speed; |
145 | enum pci_bus_speed cur_bus_speed; | 140 | enum pci_bus_speed cur_bus_speed; |
146 | }; | 141 | }; |
@@ -166,15 +161,14 @@ struct hotplug_slot { | |||
166 | 161 | ||
167 | /* Variables below this are for use only by the hotplug pci core. */ | 162 | /* Variables below this are for use only by the hotplug pci core. */ |
168 | struct list_head slot_list; | 163 | struct list_head slot_list; |
169 | struct kobject kobj; | 164 | struct pci_slot *pci_slot; |
170 | }; | 165 | }; |
171 | #define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj) | 166 | #define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj) |
172 | 167 | ||
173 | extern int pci_hp_register (struct hotplug_slot *slot); | 168 | extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr); |
174 | extern int pci_hp_deregister (struct hotplug_slot *slot); | 169 | extern int pci_hp_deregister(struct hotplug_slot *slot); |
175 | extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot, | 170 | extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot, |
176 | struct hotplug_slot_info *info); | 171 | struct hotplug_slot_info *info); |
177 | extern struct kset *pci_hotplug_slots_kset; | ||
178 | 172 | ||
179 | /* PCI Setting Record (Type 0) */ | 173 | /* PCI Setting Record (Type 0) */ |
180 | struct hpp_type0 { | 174 | struct hpp_type0 { |
@@ -227,9 +221,9 @@ struct hotplug_params { | |||
227 | #include <acpi/acpi.h> | 221 | #include <acpi/acpi.h> |
228 | #include <acpi/acpi_bus.h> | 222 | #include <acpi/acpi_bus.h> |
229 | #include <acpi/actypes.h> | 223 | #include <acpi/actypes.h> |
230 | extern acpi_status acpi_run_oshp(acpi_handle handle); | ||
231 | extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, | 224 | extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, |
232 | struct hotplug_params *hpp); | 225 | struct hotplug_params *hpp); |
226 | int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); | ||
233 | int acpi_root_bridge(acpi_handle handle); | 227 | int acpi_root_bridge(acpi_handle handle); |
234 | #endif | 228 | #endif |
235 | #endif | 229 | #endif |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 65953822c9cb..6be6a7943d8b 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2171,6 +2171,8 @@ | |||
2171 | #define PCI_DEVICE_ID_MPC8544 0x0033 | 2171 | #define PCI_DEVICE_ID_MPC8544 0x0033 |
2172 | #define PCI_DEVICE_ID_MPC8572E 0x0040 | 2172 | #define PCI_DEVICE_ID_MPC8572E 0x0040 |
2173 | #define PCI_DEVICE_ID_MPC8572 0x0041 | 2173 | #define PCI_DEVICE_ID_MPC8572 0x0041 |
2174 | #define PCI_DEVICE_ID_MPC8536E 0x0050 | ||
2175 | #define PCI_DEVICE_ID_MPC8536 0x0051 | ||
2174 | #define PCI_DEVICE_ID_MPC8641 0x7010 | 2176 | #define PCI_DEVICE_ID_MPC8641 0x7010 |
2175 | #define PCI_DEVICE_ID_MPC8641D 0x7011 | 2177 | #define PCI_DEVICE_ID_MPC8641D 0x7011 |
2176 | #define PCI_DEVICE_ID_MPC8610 0x7018 | 2178 | #define PCI_DEVICE_ID_MPC8610 0x7018 |
@@ -2188,6 +2190,7 @@ | |||
2188 | #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 | 2190 | #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 |
2189 | #define PCI_DEVICE_ID_JMICRON_JMB368 0x2368 | 2191 | #define PCI_DEVICE_ID_JMICRON_JMB368 0x2368 |
2190 | #define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381 | 2192 | #define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381 |
2193 | #define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382 | ||
2191 | #define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383 | 2194 | #define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383 |
2192 | 2195 | ||
2193 | #define PCI_VENDOR_ID_KORENIX 0x1982 | 2196 | #define PCI_VENDOR_ID_KORENIX 0x1982 |
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index c0c1223c9194..19958b929905 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h | |||
@@ -231,6 +231,7 @@ | |||
231 | #define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */ | 231 | #define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */ |
232 | #define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */ | 232 | #define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */ |
233 | #define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */ | 233 | #define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */ |
234 | #define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */ | ||
234 | #define PCI_PM_CTRL 4 /* PM control and status register */ | 235 | #define PCI_PM_CTRL 4 /* PM control and status register */ |
235 | #define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */ | 236 | #define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */ |
236 | #define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */ | 237 | #define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */ |
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 3261681c82a4..95ac21ab3a09 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
@@ -53,6 +53,7 @@ struct platform_driver { | |||
53 | int (*suspend_late)(struct platform_device *, pm_message_t state); | 53 | int (*suspend_late)(struct platform_device *, pm_message_t state); |
54 | int (*resume_early)(struct platform_device *); | 54 | int (*resume_early)(struct platform_device *); |
55 | int (*resume)(struct platform_device *); | 55 | int (*resume)(struct platform_device *); |
56 | struct pm_ext_ops *pm; | ||
56 | struct device_driver driver; | 57 | struct device_driver driver; |
57 | }; | 58 | }; |
58 | 59 | ||
diff --git a/include/linux/pm.h b/include/linux/pm.h index 39a7ee859b67..4ad9de94449a 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -112,7 +112,9 @@ typedef struct pm_message { | |||
112 | int event; | 112 | int event; |
113 | } pm_message_t; | 113 | } pm_message_t; |
114 | 114 | ||
115 | /* | 115 | /** |
116 | * struct pm_ops - device PM callbacks | ||
117 | * | ||
116 | * Several driver power state transitions are externally visible, affecting | 118 | * Several driver power state transitions are externally visible, affecting |
117 | * the state of pending I/O queues and (for drivers that touch hardware) | 119 | * the state of pending I/O queues and (for drivers that touch hardware) |
118 | * interrupts, wakeups, DMA, and other hardware state. There may also be | 120 | * interrupts, wakeups, DMA, and other hardware state. There may also be |
@@ -120,6 +122,284 @@ typedef struct pm_message { | |||
120 | * to the rest of the driver stack (such as a driver that's ON gating off | 122 | * to the rest of the driver stack (such as a driver that's ON gating off |
121 | * clocks which are not in active use). | 123 | * clocks which are not in active use). |
122 | * | 124 | * |
125 | * The externally visible transitions are handled with the help of the following | ||
126 | * callbacks included in this structure: | ||
127 | * | ||
128 | * @prepare: Prepare the device for the upcoming transition, but do NOT change | ||
129 | * its hardware state. Prevent new children of the device from being | ||
130 | * registered after @prepare() returns (the driver's subsystem and | ||
131 | * generally the rest of the kernel is supposed to prevent new calls to the | ||
132 | * probe method from being made too once @prepare() has succeeded). If | ||
133 | * @prepare() detects a situation it cannot handle (e.g. registration of a | ||
134 | * child already in progress), it may return -EAGAIN, so that the PM core | ||
135 | * can execute it once again (e.g. after the new child has been registered) | ||
136 | * to recover from the race condition. This method is executed for all | ||
137 | * kinds of suspend transitions and is followed by one of the suspend | ||
138 | * callbacks: @suspend(), @freeze(), or @poweroff(). | ||
139 | * The PM core executes @prepare() for all devices before starting to | ||
140 | * execute suspend callbacks for any of them, so drivers may assume all of | ||
141 | * the other devices to be present and functional while @prepare() is being | ||
142 | * executed. In particular, it is safe to make GFP_KERNEL memory | ||
143 | * allocations from within @prepare(). However, drivers may NOT assume | ||
144 | * anything about the availability of the user space at that time and it | ||
145 | * is not correct to request firmware from within @prepare() (it's too | ||
146 | * late to do that). [To work around this limitation, drivers may | ||
147 | * register suspend and hibernation notifiers that are executed before the | ||
148 | * freezing of tasks.] | ||
149 | * | ||
150 | * @complete: Undo the changes made by @prepare(). This method is executed for | ||
151 | * all kinds of resume transitions, following one of the resume callbacks: | ||
152 | * @resume(), @thaw(), @restore(). Also called if the state transition | ||
153 | * fails before the driver's suspend callback (@suspend(), @freeze(), | ||
154 | * @poweroff()) can be executed (e.g. if the suspend callback fails for one | ||
155 | * of the other devices that the PM core has unsuccessfully attempted to | ||
156 | * suspend earlier). | ||
157 | * The PM core executes @complete() after it has executed the appropriate | ||
158 | * resume callback for all devices. | ||
159 | * | ||
160 | * @suspend: Executed before putting the system into a sleep state in which the | ||
161 | * contents of main memory are preserved. Quiesce the device, put it into | ||
162 | * a low power state appropriate for the upcoming system state (such as | ||
163 | * PCI_D3hot), and enable wakeup events as appropriate. | ||
164 | * | ||
165 | * @resume: Executed after waking the system up from a sleep state in which the | ||
166 | * contents of main memory were preserved. Put the device into the | ||
167 | * appropriate state, according to the information saved in memory by the | ||
168 | * preceding @suspend(). The driver starts working again, responding to | ||
169 | * hardware events and software requests. The hardware may have gone | ||
170 | * through a power-off reset, or it may have maintained state from the | ||
171 | * previous suspend() which the driver may rely on while resuming. On most | ||
172 | * platforms, there are no restrictions on availability of resources like | ||
173 | * clocks during @resume(). | ||
174 | * | ||
175 | * @freeze: Hibernation-specific, executed before creating a hibernation image. | ||
176 | * Quiesce operations so that a consistent image can be created, but do NOT | ||
177 | * otherwise put the device into a low power device state and do NOT emit | ||
178 | * system wakeup events. Save in main memory the device settings to be | ||
179 | * used by @restore() during the subsequent resume from hibernation or by | ||
180 | * the subsequent @thaw(), if the creation of the image or the restoration | ||
181 | * of main memory contents from it fails. | ||
182 | * | ||
183 | * @thaw: Hibernation-specific, executed after creating a hibernation image OR | ||
184 | * if the creation of the image fails. Also executed after a failing | ||
185 | * attempt to restore the contents of main memory from such an image. | ||
186 | * Undo the changes made by the preceding @freeze(), so the device can be | ||
187 | * operated in the same way as immediately before the call to @freeze(). | ||
188 | * | ||
189 | * @poweroff: Hibernation-specific, executed after saving a hibernation image. | ||
190 | * Quiesce the device, put it into a low power state appropriate for the | ||
191 | * upcoming system state (such as PCI_D3hot), and enable wakeup events as | ||
192 | * appropriate. | ||
193 | * | ||
194 | * @restore: Hibernation-specific, executed after restoring the contents of main | ||
195 | * memory from a hibernation image. Driver starts working again, | ||
196 | * responding to hardware events and software requests. Drivers may NOT | ||
197 | * make ANY assumptions about the hardware state right prior to @restore(). | ||
198 | * On most platforms, there are no restrictions on availability of | ||
199 | * resources like clocks during @restore(). | ||
200 | * | ||
201 | * All of the above callbacks, except for @complete(), return error codes. | ||
202 | * However, the error codes returned by the resume operations, @resume(), | ||
203 | * @thaw(), and @restore(), do not cause the PM core to abort the resume | ||
204 | * transition during which they are returned. The error codes returned in | ||
205 | * that cases are only printed by the PM core to the system logs for debugging | ||
206 | * purposes. Still, it is recommended that drivers only return error codes | ||
207 | * from their resume methods in case of an unrecoverable failure (i.e. when the | ||
208 | * device being handled refuses to resume and becomes unusable) to allow us to | ||
209 | * modify the PM core in the future, so that it can avoid attempting to handle | ||
210 | * devices that failed to resume and their children. | ||
211 | * | ||
212 | * It is allowed to unregister devices while the above callbacks are being | ||
213 | * executed. However, it is not allowed to unregister a device from within any | ||
214 | * of its own callbacks. | ||
215 | */ | ||
216 | |||
217 | struct pm_ops { | ||
218 | int (*prepare)(struct device *dev); | ||
219 | void (*complete)(struct device *dev); | ||
220 | int (*suspend)(struct device *dev); | ||
221 | int (*resume)(struct device *dev); | ||
222 | int (*freeze)(struct device *dev); | ||
223 | int (*thaw)(struct device *dev); | ||
224 | int (*poweroff)(struct device *dev); | ||
225 | int (*restore)(struct device *dev); | ||
226 | }; | ||
227 | |||
228 | /** | ||
229 | * struct pm_ext_ops - extended device PM callbacks | ||
230 | * | ||
231 | * Some devices require certain operations related to suspend and hibernation | ||
232 | * to be carried out with interrupts disabled. Thus, 'struct pm_ext_ops' below | ||
233 | * is defined, adding callbacks to be executed with interrupts disabled to | ||
234 | * 'struct pm_ops'. | ||
235 | * | ||
236 | * The following callbacks included in 'struct pm_ext_ops' are executed with | ||
237 | * the nonboot CPUs switched off and with interrupts disabled on the only | ||
238 | * functional CPU. They also are executed with the PM core list of devices | ||
239 | * locked, so they must NOT unregister any devices. | ||
240 | * | ||
241 | * @suspend_noirq: Complete the operations of ->suspend() by carrying out any | ||
242 | * actions required for suspending the device that need interrupts to be | ||
243 | * disabled | ||
244 | * | ||
245 | * @resume_noirq: Prepare for the execution of ->resume() by carrying out any | ||
246 | * actions required for resuming the device that need interrupts to be | ||
247 | * disabled | ||
248 | * | ||
249 | * @freeze_noirq: Complete the operations of ->freeze() by carrying out any | ||
250 | * actions required for freezing the device that need interrupts to be | ||
251 | * disabled | ||
252 | * | ||
253 | * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any | ||
254 | * actions required for thawing the device that need interrupts to be | ||
255 | * disabled | ||
256 | * | ||
257 | * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any | ||
258 | * actions required for handling the device that need interrupts to be | ||
259 | * disabled | ||
260 | * | ||
261 | * @restore_noirq: Prepare for the execution of ->restore() by carrying out any | ||
262 | * actions required for restoring the operations of the device that need | ||
263 | * interrupts to be disabled | ||
264 | * | ||
265 | * All of the above callbacks return error codes, but the error codes returned | ||
266 | * by the resume operations, @resume_noirq(), @thaw_noirq(), and | ||
267 | * @restore_noirq(), do not cause the PM core to abort the resume transition | ||
268 | * during which they are returned. The error codes returned in that cases are | ||
269 | * only printed by the PM core to the system logs for debugging purposes. | ||
270 | * Still, as stated above, it is recommended that drivers only return error | ||
271 | * codes from their resume methods if the device being handled fails to resume | ||
272 | * and is not usable any more. | ||
273 | */ | ||
274 | |||
275 | struct pm_ext_ops { | ||
276 | struct pm_ops base; | ||
277 | int (*suspend_noirq)(struct device *dev); | ||
278 | int (*resume_noirq)(struct device *dev); | ||
279 | int (*freeze_noirq)(struct device *dev); | ||
280 | int (*thaw_noirq)(struct device *dev); | ||
281 | int (*poweroff_noirq)(struct device *dev); | ||
282 | int (*restore_noirq)(struct device *dev); | ||
283 | }; | ||
284 | |||
285 | /** | ||
286 | * PM_EVENT_ messages | ||
287 | * | ||
288 | * The following PM_EVENT_ messages are defined for the internal use of the PM | ||
289 | * core, in order to provide a mechanism allowing the high level suspend and | ||
290 | * hibernation code to convey the necessary information to the device PM core | ||
291 | * code: | ||
292 | * | ||
293 | * ON No transition. | ||
294 | * | ||
295 | * FREEZE System is going to hibernate, call ->prepare() and ->freeze() | ||
296 | * for all devices. | ||
297 | * | ||
298 | * SUSPEND System is going to suspend, call ->prepare() and ->suspend() | ||
299 | * for all devices. | ||
300 | * | ||
301 | * HIBERNATE Hibernation image has been saved, call ->prepare() and | ||
302 | * ->poweroff() for all devices. | ||
303 | * | ||
304 | * QUIESCE Contents of main memory are going to be restored from a (loaded) | ||
305 | * hibernation image, call ->prepare() and ->freeze() for all | ||
306 | * devices. | ||
307 | * | ||
308 | * RESUME System is resuming, call ->resume() and ->complete() for all | ||
309 | * devices. | ||
310 | * | ||
311 | * THAW Hibernation image has been created, call ->thaw() and | ||
312 | * ->complete() for all devices. | ||
313 | * | ||
314 | * RESTORE Contents of main memory have been restored from a hibernation | ||
315 | * image, call ->restore() and ->complete() for all devices. | ||
316 | * | ||
317 | * RECOVER Creation of a hibernation image or restoration of the main | ||
318 | * memory contents from a hibernation image has failed, call | ||
319 | * ->thaw() and ->complete() for all devices. | ||
320 | */ | ||
321 | |||
322 | #define PM_EVENT_ON 0x0000 | ||
323 | #define PM_EVENT_FREEZE 0x0001 | ||
324 | #define PM_EVENT_SUSPEND 0x0002 | ||
325 | #define PM_EVENT_HIBERNATE 0x0004 | ||
326 | #define PM_EVENT_QUIESCE 0x0008 | ||
327 | #define PM_EVENT_RESUME 0x0010 | ||
328 | #define PM_EVENT_THAW 0x0020 | ||
329 | #define PM_EVENT_RESTORE 0x0040 | ||
330 | #define PM_EVENT_RECOVER 0x0080 | ||
331 | |||
332 | #define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) | ||
333 | |||
334 | #define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) | ||
335 | #define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) | ||
336 | #define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) | ||
337 | #define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) | ||
338 | #define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) | ||
339 | #define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) | ||
340 | #define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) | ||
341 | #define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) | ||
342 | #define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) | ||
343 | |||
344 | /** | ||
345 | * Device power management states | ||
346 | * | ||
347 | * These state labels are used internally by the PM core to indicate the current | ||
348 | * status of a device with respect to the PM core operations. | ||
349 | * | ||
350 | * DPM_ON Device is regarded as operational. Set this way | ||
351 | * initially and when ->complete() is about to be called. | ||
352 | * Also set when ->prepare() fails. | ||
353 | * | ||
354 | * DPM_PREPARING Device is going to be prepared for a PM transition. Set | ||
355 | * when ->prepare() is about to be called. | ||
356 | * | ||
357 | * DPM_RESUMING Device is going to be resumed. Set when ->resume(), | ||
358 | * ->thaw(), or ->restore() is about to be called. | ||
359 | * | ||
360 | * DPM_SUSPENDING Device has been prepared for a power transition. Set | ||
361 | * when ->prepare() has just succeeded. | ||
362 | * | ||
363 | * DPM_OFF Device is regarded as inactive. Set immediately after | ||
364 | * ->suspend(), ->freeze(), or ->poweroff() has succeeded. | ||
365 | * Also set when ->resume()_noirq, ->thaw_noirq(), or | ||
366 | * ->restore_noirq() is about to be called. | ||
367 | * | ||
368 | * DPM_OFF_IRQ Device is in a "deep sleep". Set immediately after | ||
369 | * ->suspend_noirq(), ->freeze_noirq(), or | ||
370 | * ->poweroff_noirq() has just succeeded. | ||
371 | */ | ||
372 | |||
373 | enum dpm_state { | ||
374 | DPM_INVALID, | ||
375 | DPM_ON, | ||
376 | DPM_PREPARING, | ||
377 | DPM_RESUMING, | ||
378 | DPM_SUSPENDING, | ||
379 | DPM_OFF, | ||
380 | DPM_OFF_IRQ, | ||
381 | }; | ||
382 | |||
383 | struct dev_pm_info { | ||
384 | pm_message_t power_state; | ||
385 | unsigned can_wakeup:1; | ||
386 | unsigned should_wakeup:1; | ||
387 | enum dpm_state status; /* Owned by the PM core */ | ||
388 | #ifdef CONFIG_PM_SLEEP | ||
389 | struct list_head entry; | ||
390 | #endif | ||
391 | }; | ||
392 | |||
393 | /* | ||
394 | * The PM_EVENT_ messages are also used by drivers implementing the legacy | ||
395 | * suspend framework, based on the ->suspend() and ->resume() callbacks common | ||
396 | * for suspend and hibernation transitions, according to the rules below. | ||
397 | */ | ||
398 | |||
399 | /* Necessary, because several drivers use PM_EVENT_PRETHAW */ | ||
400 | #define PM_EVENT_PRETHAW PM_EVENT_QUIESCE | ||
401 | |||
402 | /* | ||
123 | * One transition is triggered by resume(), after a suspend() call; the | 403 | * One transition is triggered by resume(), after a suspend() call; the |
124 | * message is implicit: | 404 | * message is implicit: |
125 | * | 405 | * |
@@ -164,35 +444,13 @@ typedef struct pm_message { | |||
164 | * or from system low-power states such as standby or suspend-to-RAM. | 444 | * or from system low-power states such as standby or suspend-to-RAM. |
165 | */ | 445 | */ |
166 | 446 | ||
167 | #define PM_EVENT_ON 0 | 447 | #ifdef CONFIG_PM_SLEEP |
168 | #define PM_EVENT_FREEZE 1 | 448 | extern void device_pm_lock(void); |
169 | #define PM_EVENT_SUSPEND 2 | 449 | extern void device_power_up(pm_message_t state); |
170 | #define PM_EVENT_HIBERNATE 4 | 450 | extern void device_resume(pm_message_t state); |
171 | #define PM_EVENT_PRETHAW 8 | ||
172 | |||
173 | #define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) | ||
174 | |||
175 | #define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) | ||
176 | #define PMSG_PRETHAW ((struct pm_message){ .event = PM_EVENT_PRETHAW, }) | ||
177 | #define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) | ||
178 | #define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) | ||
179 | #define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) | ||
180 | |||
181 | struct dev_pm_info { | ||
182 | pm_message_t power_state; | ||
183 | unsigned can_wakeup:1; | ||
184 | unsigned should_wakeup:1; | ||
185 | bool sleeping:1; /* Owned by the PM core */ | ||
186 | #ifdef CONFIG_PM_SLEEP | ||
187 | struct list_head entry; | ||
188 | #endif | ||
189 | }; | ||
190 | 451 | ||
452 | extern void device_pm_unlock(void); | ||
191 | extern int device_power_down(pm_message_t state); | 453 | extern int device_power_down(pm_message_t state); |
192 | extern void device_power_up(void); | ||
193 | extern void device_resume(void); | ||
194 | |||
195 | #ifdef CONFIG_PM_SLEEP | ||
196 | extern int device_suspend(pm_message_t state); | 454 | extern int device_suspend(pm_message_t state); |
197 | extern int device_prepare_suspend(pm_message_t state); | 455 | extern int device_prepare_suspend(pm_message_t state); |
198 | 456 | ||
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index f0d0b2cb8d20..0aae7776185e 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h | |||
@@ -35,6 +35,11 @@ static inline void device_init_wakeup(struct device *dev, int val) | |||
35 | dev->power.can_wakeup = dev->power.should_wakeup = !!val; | 35 | dev->power.can_wakeup = dev->power.should_wakeup = !!val; |
36 | } | 36 | } |
37 | 37 | ||
38 | static inline void device_set_wakeup_capable(struct device *dev, int val) | ||
39 | { | ||
40 | dev->power.can_wakeup = !!val; | ||
41 | } | ||
42 | |||
38 | static inline int device_can_wakeup(struct device *dev) | 43 | static inline int device_can_wakeup(struct device *dev) |
39 | { | 44 | { |
40 | return dev->power.can_wakeup; | 45 | return dev->power.can_wakeup; |
@@ -47,21 +52,7 @@ static inline void device_set_wakeup_enable(struct device *dev, int val) | |||
47 | 52 | ||
48 | static inline int device_may_wakeup(struct device *dev) | 53 | static inline int device_may_wakeup(struct device *dev) |
49 | { | 54 | { |
50 | return dev->power.can_wakeup & dev->power.should_wakeup; | 55 | return dev->power.can_wakeup && dev->power.should_wakeup; |
51 | } | ||
52 | |||
53 | /* | ||
54 | * Platform hook to activate device wakeup capability, if that's not already | ||
55 | * handled by enable_irq_wake() etc. | ||
56 | * Returns zero on success, else negative errno | ||
57 | */ | ||
58 | extern int (*platform_enable_wakeup)(struct device *dev, int is_on); | ||
59 | |||
60 | static inline int call_platform_enable_wakeup(struct device *dev, int is_on) | ||
61 | { | ||
62 | if (platform_enable_wakeup) | ||
63 | return (*platform_enable_wakeup)(dev, is_on); | ||
64 | return 0; | ||
65 | } | 56 | } |
66 | 57 | ||
67 | #else /* !CONFIG_PM */ | 58 | #else /* !CONFIG_PM */ |
@@ -72,6 +63,8 @@ static inline void device_init_wakeup(struct device *dev, int val) | |||
72 | dev->power.can_wakeup = !!val; | 63 | dev->power.can_wakeup = !!val; |
73 | } | 64 | } |
74 | 65 | ||
66 | static inline void device_set_wakeup_capable(struct device *dev, int val) { } | ||
67 | |||
75 | static inline int device_can_wakeup(struct device *dev) | 68 | static inline int device_can_wakeup(struct device *dev) |
76 | { | 69 | { |
77 | return dev->power.can_wakeup; | 70 | return dev->power.can_wakeup; |
@@ -80,11 +73,6 @@ static inline int device_can_wakeup(struct device *dev) | |||
80 | #define device_set_wakeup_enable(dev, val) do {} while (0) | 73 | #define device_set_wakeup_enable(dev, val) do {} while (0) |
81 | #define device_may_wakeup(dev) 0 | 74 | #define device_may_wakeup(dev) 0 |
82 | 75 | ||
83 | static inline int call_platform_enable_wakeup(struct device *dev, int is_on) | ||
84 | { | ||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | #endif /* !CONFIG_PM */ | 76 | #endif /* !CONFIG_PM */ |
89 | 77 | ||
90 | #endif /* _LINUX_PM_WAKEUP_H */ | 78 | #endif /* _LINUX_PM_WAKEUP_H */ |
diff --git a/include/linux/pnp.h b/include/linux/pnp.h index 63b128d512fb..1ce54b63085d 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h | |||
@@ -1,6 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Linux Plug and Play Support | 2 | * Linux Plug and Play Support |
3 | * Copyright by Adam Belay <ambx1@neo.rr.com> | 3 | * Copyright by Adam Belay <ambx1@neo.rr.com> |
4 | * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. | ||
5 | * Bjorn Helgaas <bjorn.helgaas@hp.com> | ||
4 | */ | 6 | */ |
5 | 7 | ||
6 | #ifndef _LINUX_PNP_H | 8 | #ifndef _LINUX_PNP_H |
@@ -15,7 +17,6 @@ | |||
15 | 17 | ||
16 | struct pnp_protocol; | 18 | struct pnp_protocol; |
17 | struct pnp_dev; | 19 | struct pnp_dev; |
18 | struct pnp_resource_table; | ||
19 | 20 | ||
20 | /* | 21 | /* |
21 | * Resource Management | 22 | * Resource Management |
@@ -24,7 +25,14 @@ struct resource *pnp_get_resource(struct pnp_dev *, unsigned int, unsigned int); | |||
24 | 25 | ||
25 | static inline int pnp_resource_valid(struct resource *res) | 26 | static inline int pnp_resource_valid(struct resource *res) |
26 | { | 27 | { |
27 | if (res && !(res->flags & IORESOURCE_UNSET)) | 28 | if (res) |
29 | return 1; | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | static inline int pnp_resource_enabled(struct resource *res) | ||
34 | { | ||
35 | if (res && !(res->flags & IORESOURCE_DISABLED)) | ||
28 | return 1; | 36 | return 1; |
29 | return 0; | 37 | return 0; |
30 | } | 38 | } |
@@ -40,19 +48,31 @@ static inline resource_size_t pnp_resource_len(struct resource *res) | |||
40 | static inline resource_size_t pnp_port_start(struct pnp_dev *dev, | 48 | static inline resource_size_t pnp_port_start(struct pnp_dev *dev, |
41 | unsigned int bar) | 49 | unsigned int bar) |
42 | { | 50 | { |
43 | return pnp_get_resource(dev, IORESOURCE_IO, bar)->start; | 51 | struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar); |
52 | |||
53 | if (pnp_resource_valid(res)) | ||
54 | return res->start; | ||
55 | return 0; | ||
44 | } | 56 | } |
45 | 57 | ||
46 | static inline resource_size_t pnp_port_end(struct pnp_dev *dev, | 58 | static inline resource_size_t pnp_port_end(struct pnp_dev *dev, |
47 | unsigned int bar) | 59 | unsigned int bar) |
48 | { | 60 | { |
49 | return pnp_get_resource(dev, IORESOURCE_IO, bar)->end; | 61 | struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar); |
62 | |||
63 | if (pnp_resource_valid(res)) | ||
64 | return res->end; | ||
65 | return 0; | ||
50 | } | 66 | } |
51 | 67 | ||
52 | static inline unsigned long pnp_port_flags(struct pnp_dev *dev, | 68 | static inline unsigned long pnp_port_flags(struct pnp_dev *dev, |
53 | unsigned int bar) | 69 | unsigned int bar) |
54 | { | 70 | { |
55 | return pnp_get_resource(dev, IORESOURCE_IO, bar)->flags; | 71 | struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar); |
72 | |||
73 | if (pnp_resource_valid(res)) | ||
74 | return res->flags; | ||
75 | return IORESOURCE_IO | IORESOURCE_AUTO; | ||
56 | } | 76 | } |
57 | 77 | ||
58 | static inline int pnp_port_valid(struct pnp_dev *dev, unsigned int bar) | 78 | static inline int pnp_port_valid(struct pnp_dev *dev, unsigned int bar) |
@@ -63,25 +83,41 @@ static inline int pnp_port_valid(struct pnp_dev *dev, unsigned int bar) | |||
63 | static inline resource_size_t pnp_port_len(struct pnp_dev *dev, | 83 | static inline resource_size_t pnp_port_len(struct pnp_dev *dev, |
64 | unsigned int bar) | 84 | unsigned int bar) |
65 | { | 85 | { |
66 | return pnp_resource_len(pnp_get_resource(dev, IORESOURCE_IO, bar)); | 86 | struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar); |
87 | |||
88 | if (pnp_resource_valid(res)) | ||
89 | return pnp_resource_len(res); | ||
90 | return 0; | ||
67 | } | 91 | } |
68 | 92 | ||
69 | 93 | ||
70 | static inline resource_size_t pnp_mem_start(struct pnp_dev *dev, | 94 | static inline resource_size_t pnp_mem_start(struct pnp_dev *dev, |
71 | unsigned int bar) | 95 | unsigned int bar) |
72 | { | 96 | { |
73 | return pnp_get_resource(dev, IORESOURCE_MEM, bar)->start; | 97 | struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar); |
98 | |||
99 | if (pnp_resource_valid(res)) | ||
100 | return res->start; | ||
101 | return 0; | ||
74 | } | 102 | } |
75 | 103 | ||
76 | static inline resource_size_t pnp_mem_end(struct pnp_dev *dev, | 104 | static inline resource_size_t pnp_mem_end(struct pnp_dev *dev, |
77 | unsigned int bar) | 105 | unsigned int bar) |
78 | { | 106 | { |
79 | return pnp_get_resource(dev, IORESOURCE_MEM, bar)->end; | 107 | struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar); |
108 | |||
109 | if (pnp_resource_valid(res)) | ||
110 | return res->end; | ||
111 | return 0; | ||
80 | } | 112 | } |
81 | 113 | ||
82 | static inline unsigned long pnp_mem_flags(struct pnp_dev *dev, unsigned int bar) | 114 | static inline unsigned long pnp_mem_flags(struct pnp_dev *dev, unsigned int bar) |
83 | { | 115 | { |
84 | return pnp_get_resource(dev, IORESOURCE_MEM, bar)->flags; | 116 | struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar); |
117 | |||
118 | if (pnp_resource_valid(res)) | ||
119 | return res->flags; | ||
120 | return IORESOURCE_MEM | IORESOURCE_AUTO; | ||
85 | } | 121 | } |
86 | 122 | ||
87 | static inline int pnp_mem_valid(struct pnp_dev *dev, unsigned int bar) | 123 | static inline int pnp_mem_valid(struct pnp_dev *dev, unsigned int bar) |
@@ -92,18 +128,30 @@ static inline int pnp_mem_valid(struct pnp_dev *dev, unsigned int bar) | |||
92 | static inline resource_size_t pnp_mem_len(struct pnp_dev *dev, | 128 | static inline resource_size_t pnp_mem_len(struct pnp_dev *dev, |
93 | unsigned int bar) | 129 | unsigned int bar) |
94 | { | 130 | { |
95 | return pnp_resource_len(pnp_get_resource(dev, IORESOURCE_MEM, bar)); | 131 | struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar); |
132 | |||
133 | if (pnp_resource_valid(res)) | ||
134 | return pnp_resource_len(res); | ||
135 | return 0; | ||
96 | } | 136 | } |
97 | 137 | ||
98 | 138 | ||
99 | static inline resource_size_t pnp_irq(struct pnp_dev *dev, unsigned int bar) | 139 | static inline resource_size_t pnp_irq(struct pnp_dev *dev, unsigned int bar) |
100 | { | 140 | { |
101 | return pnp_get_resource(dev, IORESOURCE_IRQ, bar)->start; | 141 | struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar); |
142 | |||
143 | if (pnp_resource_valid(res)) | ||
144 | return res->start; | ||
145 | return -1; | ||
102 | } | 146 | } |
103 | 147 | ||
104 | static inline unsigned long pnp_irq_flags(struct pnp_dev *dev, unsigned int bar) | 148 | static inline unsigned long pnp_irq_flags(struct pnp_dev *dev, unsigned int bar) |
105 | { | 149 | { |
106 | return pnp_get_resource(dev, IORESOURCE_IRQ, bar)->flags; | 150 | struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar); |
151 | |||
152 | if (pnp_resource_valid(res)) | ||
153 | return res->flags; | ||
154 | return IORESOURCE_IRQ | IORESOURCE_AUTO; | ||
107 | } | 155 | } |
108 | 156 | ||
109 | static inline int pnp_irq_valid(struct pnp_dev *dev, unsigned int bar) | 157 | static inline int pnp_irq_valid(struct pnp_dev *dev, unsigned int bar) |
@@ -114,12 +162,20 @@ static inline int pnp_irq_valid(struct pnp_dev *dev, unsigned int bar) | |||
114 | 162 | ||
115 | static inline resource_size_t pnp_dma(struct pnp_dev *dev, unsigned int bar) | 163 | static inline resource_size_t pnp_dma(struct pnp_dev *dev, unsigned int bar) |
116 | { | 164 | { |
117 | return pnp_get_resource(dev, IORESOURCE_DMA, bar)->start; | 165 | struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar); |
166 | |||
167 | if (pnp_resource_valid(res)) | ||
168 | return res->start; | ||
169 | return -1; | ||
118 | } | 170 | } |
119 | 171 | ||
120 | static inline unsigned long pnp_dma_flags(struct pnp_dev *dev, unsigned int bar) | 172 | static inline unsigned long pnp_dma_flags(struct pnp_dev *dev, unsigned int bar) |
121 | { | 173 | { |
122 | return pnp_get_resource(dev, IORESOURCE_DMA, bar)->flags; | 174 | struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar); |
175 | |||
176 | if (pnp_resource_valid(res)) | ||
177 | return res->flags; | ||
178 | return IORESOURCE_DMA | IORESOURCE_AUTO; | ||
123 | } | 179 | } |
124 | 180 | ||
125 | static inline int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar) | 181 | static inline int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar) |
@@ -128,57 +184,6 @@ static inline int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar) | |||
128 | } | 184 | } |
129 | 185 | ||
130 | 186 | ||
131 | #define PNP_PORT_FLAG_16BITADDR (1<<0) | ||
132 | #define PNP_PORT_FLAG_FIXED (1<<1) | ||
133 | |||
134 | struct pnp_port { | ||
135 | unsigned short min; /* min base number */ | ||
136 | unsigned short max; /* max base number */ | ||
137 | unsigned char align; /* align boundary */ | ||
138 | unsigned char size; /* size of range */ | ||
139 | unsigned char flags; /* port flags */ | ||
140 | unsigned char pad; /* pad */ | ||
141 | struct pnp_port *next; /* next port */ | ||
142 | }; | ||
143 | |||
144 | #define PNP_IRQ_NR 256 | ||
145 | struct pnp_irq { | ||
146 | DECLARE_BITMAP(map, PNP_IRQ_NR); /* bitmask for IRQ lines */ | ||
147 | unsigned char flags; /* IRQ flags */ | ||
148 | unsigned char pad; /* pad */ | ||
149 | struct pnp_irq *next; /* next IRQ */ | ||
150 | }; | ||
151 | |||
152 | struct pnp_dma { | ||
153 | unsigned char map; /* bitmask for DMA channels */ | ||
154 | unsigned char flags; /* DMA flags */ | ||
155 | struct pnp_dma *next; /* next port */ | ||
156 | }; | ||
157 | |||
158 | struct pnp_mem { | ||
159 | unsigned int min; /* min base number */ | ||
160 | unsigned int max; /* max base number */ | ||
161 | unsigned int align; /* align boundary */ | ||
162 | unsigned int size; /* size of range */ | ||
163 | unsigned char flags; /* memory flags */ | ||
164 | unsigned char pad; /* pad */ | ||
165 | struct pnp_mem *next; /* next memory resource */ | ||
166 | }; | ||
167 | |||
168 | #define PNP_RES_PRIORITY_PREFERRED 0 | ||
169 | #define PNP_RES_PRIORITY_ACCEPTABLE 1 | ||
170 | #define PNP_RES_PRIORITY_FUNCTIONAL 2 | ||
171 | #define PNP_RES_PRIORITY_INVALID 65535 | ||
172 | |||
173 | struct pnp_option { | ||
174 | unsigned short priority; /* priority */ | ||
175 | struct pnp_port *port; /* first port */ | ||
176 | struct pnp_irq *irq; /* first IRQ */ | ||
177 | struct pnp_dma *dma; /* first DMA */ | ||
178 | struct pnp_mem *mem; /* first memory resource */ | ||
179 | struct pnp_option *next; /* used to chain dependent resources */ | ||
180 | }; | ||
181 | |||
182 | /* | 187 | /* |
183 | * Device Management | 188 | * Device Management |
184 | */ | 189 | */ |
@@ -246,9 +251,9 @@ struct pnp_dev { | |||
246 | 251 | ||
247 | int active; | 252 | int active; |
248 | int capabilities; | 253 | int capabilities; |
249 | struct pnp_option *independent; | 254 | unsigned int num_dependent_sets; |
250 | struct pnp_option *dependent; | 255 | struct list_head resources; |
251 | struct pnp_resource_table *res; | 256 | struct list_head options; |
252 | 257 | ||
253 | char name[PNP_NAME_LEN]; /* contains a human-readable name */ | 258 | char name[PNP_NAME_LEN]; /* contains a human-readable name */ |
254 | int flags; /* used by protocols */ | 259 | int flags; /* used by protocols */ |
@@ -425,6 +430,8 @@ void pnp_unregister_card_driver(struct pnp_card_driver *drv); | |||
425 | extern struct list_head pnp_cards; | 430 | extern struct list_head pnp_cards; |
426 | 431 | ||
427 | /* resource management */ | 432 | /* resource management */ |
433 | int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t base, | ||
434 | resource_size_t size); | ||
428 | int pnp_auto_config_dev(struct pnp_dev *dev); | 435 | int pnp_auto_config_dev(struct pnp_dev *dev); |
429 | int pnp_start_dev(struct pnp_dev *dev); | 436 | int pnp_start_dev(struct pnp_dev *dev); |
430 | int pnp_stop_dev(struct pnp_dev *dev); | 437 | int pnp_stop_dev(struct pnp_dev *dev); |
@@ -452,6 +459,9 @@ static inline int pnp_register_card_driver(struct pnp_card_driver *drv) { return | |||
452 | static inline void pnp_unregister_card_driver(struct pnp_card_driver *drv) { } | 459 | static inline void pnp_unregister_card_driver(struct pnp_card_driver *drv) { } |
453 | 460 | ||
454 | /* resource management */ | 461 | /* resource management */ |
462 | static inline int pnp_possible_config(struct pnp_dev *dev, int type, | ||
463 | resource_size_t base, | ||
464 | resource_size_t size) { return 0; } | ||
455 | static inline int pnp_auto_config_dev(struct pnp_dev *dev) { return -ENODEV; } | 465 | static inline int pnp_auto_config_dev(struct pnp_dev *dev) { return -ENODEV; } |
456 | static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; } | 466 | static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; } |
457 | static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; } | 467 | static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; } |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 21349173d148..1941d8b5cf11 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1062,12 +1062,6 @@ struct task_struct { | |||
1062 | #endif | 1062 | #endif |
1063 | 1063 | ||
1064 | struct list_head tasks; | 1064 | struct list_head tasks; |
1065 | /* | ||
1066 | * ptrace_list/ptrace_children forms the list of my children | ||
1067 | * that were stolen by a ptracer. | ||
1068 | */ | ||
1069 | struct list_head ptrace_children; | ||
1070 | struct list_head ptrace_list; | ||
1071 | 1065 | ||
1072 | struct mm_struct *mm, *active_mm; | 1066 | struct mm_struct *mm, *active_mm; |
1073 | 1067 | ||
@@ -1089,18 +1083,25 @@ struct task_struct { | |||
1089 | /* | 1083 | /* |
1090 | * pointers to (original) parent process, youngest child, younger sibling, | 1084 | * pointers to (original) parent process, youngest child, younger sibling, |
1091 | * older sibling, respectively. (p->father can be replaced with | 1085 | * older sibling, respectively. (p->father can be replaced with |
1092 | * p->parent->pid) | 1086 | * p->real_parent->pid) |
1093 | */ | 1087 | */ |
1094 | struct task_struct *real_parent; /* real parent process (when being debugged) */ | 1088 | struct task_struct *real_parent; /* real parent process */ |
1095 | struct task_struct *parent; /* parent process */ | 1089 | struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */ |
1096 | /* | 1090 | /* |
1097 | * children/sibling forms the list of my children plus the | 1091 | * children/sibling forms the list of my natural children |
1098 | * tasks I'm ptracing. | ||
1099 | */ | 1092 | */ |
1100 | struct list_head children; /* list of my children */ | 1093 | struct list_head children; /* list of my children */ |
1101 | struct list_head sibling; /* linkage in my parent's children list */ | 1094 | struct list_head sibling; /* linkage in my parent's children list */ |
1102 | struct task_struct *group_leader; /* threadgroup leader */ | 1095 | struct task_struct *group_leader; /* threadgroup leader */ |
1103 | 1096 | ||
1097 | /* | ||
1098 | * ptraced is the list of tasks this task is using ptrace on. | ||
1099 | * This includes both natural children and PTRACE_ATTACH targets. | ||
1100 | * p->ptrace_entry is p's link on the p->parent->ptraced list. | ||
1101 | */ | ||
1102 | struct list_head ptraced; | ||
1103 | struct list_head ptrace_entry; | ||
1104 | |||
1104 | /* PID/PID hash table linkage. */ | 1105 | /* PID/PID hash table linkage. */ |
1105 | struct pid_link pids[PIDTYPE_MAX]; | 1106 | struct pid_link pids[PIDTYPE_MAX]; |
1106 | struct list_head thread_group; | 1107 | struct list_head thread_group; |
@@ -1494,6 +1495,7 @@ static inline void put_task_struct(struct task_struct *t) | |||
1494 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 1495 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
1495 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1496 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
1496 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ | 1497 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ |
1498 | #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ | ||
1497 | 1499 | ||
1498 | /* | 1500 | /* |
1499 | * Only the _current_ task can read/write to tsk->flags, but other | 1501 | * Only the _current_ task can read/write to tsk->flags, but other |
@@ -1875,9 +1877,6 @@ extern void wait_task_inactive(struct task_struct * p); | |||
1875 | #define wait_task_inactive(p) do { } while (0) | 1877 | #define wait_task_inactive(p) do { } while (0) |
1876 | #endif | 1878 | #endif |
1877 | 1879 | ||
1878 | #define remove_parent(p) list_del_init(&(p)->sibling) | ||
1879 | #define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children) | ||
1880 | |||
1881 | #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) | 1880 | #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) |
1882 | 1881 | ||
1883 | #define for_each_process(p) \ | 1882 | #define for_each_process(p) \ |
diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h index d5ca78b93a3b..a3626aedaec9 100644 --- a/include/linux/spi/mmc_spi.h +++ b/include/linux/spi/mmc_spi.h | |||
@@ -23,6 +23,15 @@ struct mmc_spi_platform_data { | |||
23 | /* sense switch on sd cards */ | 23 | /* sense switch on sd cards */ |
24 | int (*get_ro)(struct device *); | 24 | int (*get_ro)(struct device *); |
25 | 25 | ||
26 | /* | ||
27 | * If board does not use CD interrupts, driver can optimize polling | ||
28 | * using this function. | ||
29 | */ | ||
30 | int (*get_cd)(struct device *); | ||
31 | |||
32 | /* Capabilities to pass into mmc core (e.g. MMC_CAP_NEEDS_POLL). */ | ||
33 | unsigned long caps; | ||
34 | |||
26 | /* how long to debounce card detect, in msecs */ | 35 | /* how long to debounce card detect, in msecs */ |
27 | u16 detect_delay; | 36 | u16 detect_delay; |
28 | 37 | ||
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 6fff7f82ef12..e5bfe01ee305 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
@@ -42,7 +42,8 @@ struct rpc_clnt { | |||
42 | 42 | ||
43 | unsigned int cl_softrtry : 1,/* soft timeouts */ | 43 | unsigned int cl_softrtry : 1,/* soft timeouts */ |
44 | cl_discrtry : 1,/* disconnect before retry */ | 44 | cl_discrtry : 1,/* disconnect before retry */ |
45 | cl_autobind : 1;/* use getport() */ | 45 | cl_autobind : 1,/* use getport() */ |
46 | cl_chatty : 1;/* be verbose */ | ||
46 | 47 | ||
47 | struct rpc_rtt * cl_rtt; /* RTO estimator data */ | 48 | struct rpc_rtt * cl_rtt; /* RTO estimator data */ |
48 | const struct rpc_timeout *cl_timeout; /* Timeout strategy */ | 49 | const struct rpc_timeout *cl_timeout; /* Timeout strategy */ |
@@ -114,6 +115,7 @@ struct rpc_create_args { | |||
114 | #define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3) | 115 | #define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3) |
115 | #define RPC_CLNT_CREATE_NOPING (1UL << 4) | 116 | #define RPC_CLNT_CREATE_NOPING (1UL << 4) |
116 | #define RPC_CLNT_CREATE_DISCRTRY (1UL << 5) | 117 | #define RPC_CLNT_CREATE_DISCRTRY (1UL << 5) |
118 | #define RPC_CLNT_CREATE_QUIET (1UL << 6) | ||
117 | 119 | ||
118 | struct rpc_clnt *rpc_create(struct rpc_create_args *args); | 120 | struct rpc_clnt *rpc_create(struct rpc_create_args *args); |
119 | struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, | 121 | struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, |
@@ -123,6 +125,9 @@ void rpc_shutdown_client(struct rpc_clnt *); | |||
123 | void rpc_release_client(struct rpc_clnt *); | 125 | void rpc_release_client(struct rpc_clnt *); |
124 | 126 | ||
125 | int rpcb_register(u32, u32, int, unsigned short, int *); | 127 | int rpcb_register(u32, u32, int, unsigned short, int *); |
128 | int rpcb_v4_register(const u32 program, const u32 version, | ||
129 | const struct sockaddr *address, | ||
130 | const char *netid, int *result); | ||
126 | int rpcb_getport_sync(struct sockaddr_in *, u32, u32, int); | 131 | int rpcb_getport_sync(struct sockaddr_in *, u32, u32, int); |
127 | void rpcb_getport_async(struct rpc_task *); | 132 | void rpcb_getport_async(struct rpc_task *); |
128 | 133 | ||
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index d1a5c8c1a0f1..64981a2f1cae 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
@@ -135,7 +135,6 @@ struct rpc_task_setup { | |||
135 | #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) | 135 | #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) |
136 | #define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS) | 136 | #define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS) |
137 | #define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) | 137 | #define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) |
138 | #define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL) | ||
139 | #define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT) | 138 | #define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT) |
140 | 139 | ||
141 | #define RPC_TASK_RUNNING 0 | 140 | #define RPC_TASK_RUNNING 0 |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index a6977423baf7..e8e69159af71 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -86,6 +86,11 @@ typedef int __bitwise suspend_state_t; | |||
86 | * that implement @begin(), but platforms implementing @begin() should | 86 | * that implement @begin(), but platforms implementing @begin() should |
87 | * also provide a @end() which cleans up transitions aborted before | 87 | * also provide a @end() which cleans up transitions aborted before |
88 | * @enter(). | 88 | * @enter(). |
89 | * | ||
90 | * @recover: Recover the platform from a suspend failure. | ||
91 | * Called by the PM core if the suspending of devices fails. | ||
92 | * This callback is optional and should only be implemented by platforms | ||
93 | * which require special recovery actions in that situation. | ||
89 | */ | 94 | */ |
90 | struct platform_suspend_ops { | 95 | struct platform_suspend_ops { |
91 | int (*valid)(suspend_state_t state); | 96 | int (*valid)(suspend_state_t state); |
@@ -94,6 +99,7 @@ struct platform_suspend_ops { | |||
94 | int (*enter)(suspend_state_t state); | 99 | int (*enter)(suspend_state_t state); |
95 | void (*finish)(void); | 100 | void (*finish)(void); |
96 | void (*end)(void); | 101 | void (*end)(void); |
102 | void (*recover)(void); | ||
97 | }; | 103 | }; |
98 | 104 | ||
99 | #ifdef CONFIG_SUSPEND | 105 | #ifdef CONFIG_SUSPEND |
@@ -149,7 +155,7 @@ extern void mark_free_pages(struct zone *zone); | |||
149 | * The methods in this structure allow a platform to carry out special | 155 | * The methods in this structure allow a platform to carry out special |
150 | * operations required by it during a hibernation transition. | 156 | * operations required by it during a hibernation transition. |
151 | * | 157 | * |
152 | * All the methods below must be implemented. | 158 | * All the methods below, except for @recover(), must be implemented. |
153 | * | 159 | * |
154 | * @begin: Tell the platform driver that we're starting hibernation. | 160 | * @begin: Tell the platform driver that we're starting hibernation. |
155 | * Called right after shrinking memory and before freezing devices. | 161 | * Called right after shrinking memory and before freezing devices. |
@@ -189,6 +195,11 @@ extern void mark_free_pages(struct zone *zone); | |||
189 | * @restore_cleanup: Clean up after a failing image restoration. | 195 | * @restore_cleanup: Clean up after a failing image restoration. |
190 | * Called right after the nonboot CPUs have been enabled and before | 196 | * Called right after the nonboot CPUs have been enabled and before |
191 | * thawing devices (runs with IRQs on). | 197 | * thawing devices (runs with IRQs on). |
198 | * | ||
199 | * @recover: Recover the platform from a failure to suspend devices. | ||
200 | * Called by the PM core if the suspending of devices during hibernation | ||
201 | * fails. This callback is optional and should only be implemented by | ||
202 | * platforms which require special recovery actions in that situation. | ||
192 | */ | 203 | */ |
193 | struct platform_hibernation_ops { | 204 | struct platform_hibernation_ops { |
194 | int (*begin)(void); | 205 | int (*begin)(void); |
@@ -200,6 +211,7 @@ struct platform_hibernation_ops { | |||
200 | void (*leave)(void); | 211 | void (*leave)(void); |
201 | int (*pre_restore)(void); | 212 | int (*pre_restore)(void); |
202 | void (*restore_cleanup)(void); | 213 | void (*restore_cleanup)(void); |
214 | void (*recover)(void); | ||
203 | }; | 215 | }; |
204 | 216 | ||
205 | #ifdef CONFIG_HIBERNATION | 217 | #ifdef CONFIG_HIBERNATION |