diff options
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/blkdev.h | 1 | ||||
| -rw-r--r-- | include/linux/dcache.h | 1 | ||||
| -rw-r--r-- | include/linux/firmware.h | 23 | ||||
| -rw-r--r-- | include/linux/fs.h | 2 | ||||
| -rw-r--r-- | include/linux/i2c-algo-pcf.h | 8 | ||||
| -rw-r--r-- | include/linux/i2c-id.h | 3 | ||||
| -rw-r--r-- | include/linux/i2c.h | 46 | ||||
| -rw-r--r-- | include/linux/i2c/at24.h | 28 | ||||
| -rw-r--r-- | include/linux/ide.h | 55 | ||||
| -rw-r--r-- | include/linux/ihex.h | 74 | ||||
| -rw-r--r-- | include/linux/interrupt.h | 5 | ||||
| -rw-r--r-- | include/linux/irq.h | 9 | ||||
| -rw-r--r-- | include/linux/jbd2.h | 73 | ||||
| -rw-r--r-- | include/linux/libata.h | 55 | ||||
| -rw-r--r-- | include/linux/list.h | 367 | ||||
| -rw-r--r-- | include/linux/lm_interface.h | 6 | ||||
| -rw-r--r-- | include/linux/mlx4/device.h | 3 | ||||
| -rw-r--r-- | include/linux/mpage.h | 10 | ||||
| -rw-r--r-- | include/linux/percpu_counter.h | 12 | ||||
| -rw-r--r-- | include/linux/rcuclassic.h | 3 | ||||
| -rw-r--r-- | include/linux/rculist.h | 369 | ||||
| -rw-r--r-- | include/linux/rcupdate.h | 26 | ||||
| -rw-r--r-- | include/linux/rcupreempt.h | 42 | ||||
| -rw-r--r-- | include/linux/smp.h | 46 | ||||
| -rw-r--r-- | include/linux/topology.h | 13 | ||||
| -rw-r--r-- | include/linux/writeback.h | 1 |
26 files changed, 798 insertions, 483 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1ffd8bfdc4c9..32a441b05fd5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -651,7 +651,6 @@ extern void generic_make_request(struct bio *bio); | |||
| 651 | extern void blk_rq_init(struct request_queue *q, struct request *rq); | 651 | extern void blk_rq_init(struct request_queue *q, struct request *rq); |
| 652 | extern void blk_put_request(struct request *); | 652 | extern void blk_put_request(struct request *); |
| 653 | extern void __blk_put_request(struct request_queue *, struct request *); | 653 | extern void __blk_put_request(struct request_queue *, struct request *); |
| 654 | extern void blk_end_sync_rq(struct request *rq, int error); | ||
| 655 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 654 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
| 656 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 655 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
| 657 | extern void blk_requeue_request(struct request_queue *, struct request *); | 656 | extern void blk_requeue_request(struct request_queue *, struct request *); |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index d982eb89c77d..98202c672fde 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #include <asm/atomic.h> | 4 | #include <asm/atomic.h> |
| 5 | #include <linux/list.h> | 5 | #include <linux/list.h> |
| 6 | #include <linux/rculist.h> | ||
| 6 | #include <linux/spinlock.h> | 7 | #include <linux/spinlock.h> |
| 7 | #include <linux/cache.h> | 8 | #include <linux/cache.h> |
| 8 | #include <linux/rcupdate.h> | 9 | #include <linux/rcupdate.h> |
diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 6c7eff2ebada..c8ecf5b2a207 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h | |||
| @@ -1,18 +1,39 @@ | |||
| 1 | #ifndef _LINUX_FIRMWARE_H | 1 | #ifndef _LINUX_FIRMWARE_H |
| 2 | #define _LINUX_FIRMWARE_H | 2 | #define _LINUX_FIRMWARE_H |
| 3 | |||
| 3 | #include <linux/module.h> | 4 | #include <linux/module.h> |
| 4 | #include <linux/types.h> | 5 | #include <linux/types.h> |
| 6 | #include <linux/compiler.h> | ||
| 7 | |||
| 5 | #define FIRMWARE_NAME_MAX 30 | 8 | #define FIRMWARE_NAME_MAX 30 |
| 6 | #define FW_ACTION_NOHOTPLUG 0 | 9 | #define FW_ACTION_NOHOTPLUG 0 |
| 7 | #define FW_ACTION_HOTPLUG 1 | 10 | #define FW_ACTION_HOTPLUG 1 |
| 8 | 11 | ||
| 9 | struct firmware { | 12 | struct firmware { |
| 10 | size_t size; | 13 | size_t size; |
| 11 | u8 *data; | 14 | const u8 *data; |
| 12 | }; | 15 | }; |
| 13 | 16 | ||
| 14 | struct device; | 17 | struct device; |
| 15 | 18 | ||
| 19 | struct builtin_fw { | ||
| 20 | char *name; | ||
| 21 | void *data; | ||
| 22 | unsigned long size; | ||
| 23 | }; | ||
| 24 | |||
| 25 | /* We have to play tricks here much like stringify() to get the | ||
| 26 | __COUNTER__ macro to be expanded as we want it */ | ||
| 27 | #define __fw_concat1(x, y) x##y | ||
| 28 | #define __fw_concat(x, y) __fw_concat1(x, y) | ||
| 29 | |||
| 30 | #define DECLARE_BUILTIN_FIRMWARE(name, blob) \ | ||
| 31 | DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob)) | ||
| 32 | |||
| 33 | #define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \ | ||
| 34 | static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \ | ||
| 35 | __used __section(.builtin_fw) = { name, blob, size } | ||
| 36 | |||
| 16 | #if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE)) | 37 | #if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE)) |
| 17 | int request_firmware(const struct firmware **fw, const char *name, | 38 | int request_firmware(const struct firmware **fw, const char *name, |
| 18 | struct device *device); | 39 | struct device *device); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index faac13e2cc5c..52e510a0aec2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -1740,6 +1740,8 @@ extern int wait_on_page_writeback_range(struct address_space *mapping, | |||
| 1740 | pgoff_t start, pgoff_t end); | 1740 | pgoff_t start, pgoff_t end); |
| 1741 | extern int __filemap_fdatawrite_range(struct address_space *mapping, | 1741 | extern int __filemap_fdatawrite_range(struct address_space *mapping, |
| 1742 | loff_t start, loff_t end, int sync_mode); | 1742 | loff_t start, loff_t end, int sync_mode); |
| 1743 | extern int filemap_fdatawrite_range(struct address_space *mapping, | ||
| 1744 | loff_t start, loff_t end); | ||
| 1743 | 1745 | ||
| 1744 | extern long do_fsync(struct file *file, int datasync); | 1746 | extern long do_fsync(struct file *file, int datasync); |
| 1745 | extern void sync_supers(void); | 1747 | extern void sync_supers(void); |
diff --git a/include/linux/i2c-algo-pcf.h b/include/linux/i2c-algo-pcf.h index 77afbb60fd11..0177d280f733 100644 --- a/include/linux/i2c-algo-pcf.h +++ b/include/linux/i2c-algo-pcf.h | |||
| @@ -33,9 +33,11 @@ struct i2c_algo_pcf_data { | |||
| 33 | int (*getclock) (void *data); | 33 | int (*getclock) (void *data); |
| 34 | void (*waitforpin) (void); | 34 | void (*waitforpin) (void); |
| 35 | 35 | ||
| 36 | /* local settings */ | 36 | /* Multi-master lost arbitration back-off delay (msecs) |
| 37 | int udelay; | 37 | * This should be set by the bus adapter or knowledgable client |
| 38 | int timeout; | 38 | * if bus is multi-mastered, else zero |
| 39 | */ | ||
| 40 | unsigned long lab_mdelay; | ||
| 39 | }; | 41 | }; |
| 40 | 42 | ||
| 41 | int i2c_pcf_add_bus(struct i2c_adapter *); | 43 | int i2c_pcf_add_bus(struct i2c_adapter *); |
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 580acc93903e..ef13b7c66df3 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h | |||
| @@ -91,8 +91,6 @@ | |||
| 91 | #define I2C_DRIVERID_M52790 95 /* Mitsubishi M52790SP/FP AV switch */ | 91 | #define I2C_DRIVERID_M52790 95 /* Mitsubishi M52790SP/FP AV switch */ |
| 92 | #define I2C_DRIVERID_CS5345 96 /* cs5345 audio processor */ | 92 | #define I2C_DRIVERID_CS5345 96 /* cs5345 audio processor */ |
| 93 | 93 | ||
| 94 | #define I2C_DRIVERID_I2CDEV 900 | ||
| 95 | |||
| 96 | #define I2C_DRIVERID_OV7670 1048 /* Omnivision 7670 camera */ | 94 | #define I2C_DRIVERID_OV7670 1048 /* Omnivision 7670 camera */ |
| 97 | 95 | ||
| 98 | /* | 96 | /* |
| @@ -111,7 +109,6 @@ | |||
| 111 | #define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */ | 109 | #define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */ |
| 112 | #define I2C_HW_B_IOC 0x010011 /* IOC bit-wiggling */ | 110 | #define I2C_HW_B_IOC 0x010011 /* IOC bit-wiggling */ |
| 113 | #define I2C_HW_B_IXP2000 0x010016 /* GPIO on IXP2000 systems */ | 111 | #define I2C_HW_B_IXP2000 0x010016 /* GPIO on IXP2000 systems */ |
| 114 | #define I2C_HW_B_S3VIA 0x010018 /* S3Via ProSavage adapter */ | ||
| 115 | #define I2C_HW_B_ZR36067 0x010019 /* Zoran-36057/36067 based boards */ | 112 | #define I2C_HW_B_ZR36067 0x010019 /* Zoran-36057/36067 based boards */ |
| 116 | #define I2C_HW_B_PCILYNX 0x01001a /* TI PCILynx I2C adapter */ | 113 | #define I2C_HW_B_PCILYNX 0x01001a /* TI PCILynx I2C adapter */ |
| 117 | #define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */ | 114 | #define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */ |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 8dc730132192..08be0d21864c 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
| @@ -35,6 +35,8 @@ | |||
| 35 | #include <linux/sched.h> /* for completion */ | 35 | #include <linux/sched.h> /* for completion */ |
| 36 | #include <linux/mutex.h> | 36 | #include <linux/mutex.h> |
| 37 | 37 | ||
| 38 | extern struct bus_type i2c_bus_type; | ||
| 39 | |||
| 38 | /* --- General options ------------------------------------------------ */ | 40 | /* --- General options ------------------------------------------------ */ |
| 39 | 41 | ||
| 40 | struct i2c_msg; | 42 | struct i2c_msg; |
| @@ -43,6 +45,7 @@ struct i2c_adapter; | |||
| 43 | struct i2c_client; | 45 | struct i2c_client; |
| 44 | struct i2c_driver; | 46 | struct i2c_driver; |
| 45 | union i2c_smbus_data; | 47 | union i2c_smbus_data; |
| 48 | struct i2c_board_info; | ||
| 46 | 49 | ||
| 47 | /* | 50 | /* |
| 48 | * The master routines are the ones normally used to transmit data to devices | 51 | * The master routines are the ones normally used to transmit data to devices |
| @@ -69,9 +72,8 @@ extern s32 i2c_smbus_xfer (struct i2c_adapter * adapter, u16 addr, | |||
| 69 | union i2c_smbus_data * data); | 72 | union i2c_smbus_data * data); |
| 70 | 73 | ||
| 71 | /* Now follow the 'nice' access routines. These also document the calling | 74 | /* Now follow the 'nice' access routines. These also document the calling |
| 72 | conventions of smbus_access. */ | 75 | conventions of i2c_smbus_xfer. */ |
| 73 | 76 | ||
| 74 | extern s32 i2c_smbus_write_quick(struct i2c_client * client, u8 value); | ||
| 75 | extern s32 i2c_smbus_read_byte(struct i2c_client * client); | 77 | extern s32 i2c_smbus_read_byte(struct i2c_client * client); |
| 76 | extern s32 i2c_smbus_write_byte(struct i2c_client * client, u8 value); | 78 | extern s32 i2c_smbus_write_byte(struct i2c_client * client, u8 value); |
| 77 | extern s32 i2c_smbus_read_byte_data(struct i2c_client * client, u8 command); | 79 | extern s32 i2c_smbus_read_byte_data(struct i2c_client * client, u8 command); |
| @@ -93,15 +95,33 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client * client, | |||
| 93 | u8 command, u8 length, | 95 | u8 command, u8 length, |
| 94 | const u8 *values); | 96 | const u8 *values); |
| 95 | 97 | ||
| 96 | /* | 98 | /** |
| 97 | * A driver is capable of handling one or more physical devices present on | 99 | * struct i2c_driver - represent an I2C device driver |
| 98 | * I2C adapters. This information is used to inform the driver of adapter | 100 | * @class: What kind of i2c device we instantiate (for detect) |
| 99 | * events. | 101 | * @detect: Callback for device detection |
| 102 | * @address_data: The I2C addresses to probe, ignore or force (for detect) | ||
| 103 | * @clients: List of detected clients we created (for i2c-core use only) | ||
| 100 | * | 104 | * |
| 101 | * The driver.owner field should be set to the module owner of this driver. | 105 | * The driver.owner field should be set to the module owner of this driver. |
| 102 | * The driver.name field should be set to the name of this driver. | 106 | * The driver.name field should be set to the name of this driver. |
| 107 | * | ||
| 108 | * For automatic device detection, both @detect and @address_data must | ||
| 109 | * be defined. @class should also be set, otherwise only devices forced | ||
| 110 | * with module parameters will be created. The detect function must | ||
| 111 | * fill at least the name field of the i2c_board_info structure it is | ||
| 112 | * handed upon successful detection, and possibly also the flags field. | ||
| 113 | * | ||
| 114 | * If @detect is missing, the driver will still work fine for enumerated | ||
| 115 | * devices. Detected devices simply won't be supported. This is expected | ||
| 116 | * for the many I2C/SMBus devices which can't be detected reliably, and | ||
| 117 | * the ones which can always be enumerated in practice. | ||
| 118 | * | ||
| 119 | * The i2c_client structure which is handed to the @detect callback is | ||
| 120 | * not a real i2c_client. It is initialized just enough so that you can | ||
| 121 | * call i2c_smbus_read_byte_data and friends on it. Don't do anything | ||
| 122 | * else with it. In particular, calling dev_dbg and friends on it is | ||
| 123 | * not allowed. | ||
| 103 | */ | 124 | */ |
| 104 | |||
| 105 | struct i2c_driver { | 125 | struct i2c_driver { |
| 106 | int id; | 126 | int id; |
| 107 | unsigned int class; | 127 | unsigned int class; |
| @@ -141,6 +161,11 @@ struct i2c_driver { | |||
| 141 | 161 | ||
| 142 | struct device_driver driver; | 162 | struct device_driver driver; |
| 143 | const struct i2c_device_id *id_table; | 163 | const struct i2c_device_id *id_table; |
| 164 | |||
| 165 | /* Device detection callback for automatic device creation */ | ||
| 166 | int (*detect)(struct i2c_client *, int kind, struct i2c_board_info *); | ||
| 167 | const struct i2c_client_address_data *address_data; | ||
| 168 | struct list_head clients; | ||
| 144 | }; | 169 | }; |
| 145 | #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) | 170 | #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) |
| 146 | 171 | ||
| @@ -156,6 +181,7 @@ struct i2c_driver { | |||
| 156 | * @dev: Driver model device node for the slave. | 181 | * @dev: Driver model device node for the slave. |
| 157 | * @irq: indicates the IRQ generated by this device (if any) | 182 | * @irq: indicates the IRQ generated by this device (if any) |
| 158 | * @list: list of active/busy clients (DEPRECATED) | 183 | * @list: list of active/busy clients (DEPRECATED) |
| 184 | * @detected: member of an i2c_driver.clients list | ||
| 159 | * @released: used to synchronize client releases & detaches and references | 185 | * @released: used to synchronize client releases & detaches and references |
| 160 | * | 186 | * |
| 161 | * An i2c_client identifies a single device (i.e. chip) connected to an | 187 | * An i2c_client identifies a single device (i.e. chip) connected to an |
| @@ -173,6 +199,7 @@ struct i2c_client { | |||
| 173 | struct device dev; /* the device structure */ | 199 | struct device dev; /* the device structure */ |
| 174 | int irq; /* irq issued by device */ | 200 | int irq; /* irq issued by device */ |
| 175 | struct list_head list; /* DEPRECATED */ | 201 | struct list_head list; /* DEPRECATED */ |
| 202 | struct list_head detected; | ||
| 176 | struct completion released; | 203 | struct completion released; |
| 177 | }; | 204 | }; |
| 178 | #define to_i2c_client(d) container_of(d, struct i2c_client, dev) | 205 | #define to_i2c_client(d) container_of(d, struct i2c_client, dev) |
| @@ -350,10 +377,11 @@ static inline void i2c_set_adapdata (struct i2c_adapter *dev, void *data) | |||
| 350 | #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ | 377 | #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ |
| 351 | #define I2C_CLASS_TV_ANALOG (1<<1) /* bttv + friends */ | 378 | #define I2C_CLASS_TV_ANALOG (1<<1) /* bttv + friends */ |
| 352 | #define I2C_CLASS_TV_DIGITAL (1<<2) /* dvb cards */ | 379 | #define I2C_CLASS_TV_DIGITAL (1<<2) /* dvb cards */ |
| 353 | #define I2C_CLASS_DDC (1<<3) /* i2c-matroxfb ? */ | 380 | #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ |
| 354 | #define I2C_CLASS_CAM_ANALOG (1<<4) /* camera with analog CCD */ | 381 | #define I2C_CLASS_CAM_ANALOG (1<<4) /* camera with analog CCD */ |
| 355 | #define I2C_CLASS_CAM_DIGITAL (1<<5) /* most webcams */ | 382 | #define I2C_CLASS_CAM_DIGITAL (1<<5) /* most webcams */ |
| 356 | #define I2C_CLASS_SOUND (1<<6) /* sound devices */ | 383 | #define I2C_CLASS_SOUND (1<<6) /* sound devices */ |
| 384 | #define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */ | ||
| 357 | #define I2C_CLASS_ALL (UINT_MAX) /* all of the above */ | 385 | #define I2C_CLASS_ALL (UINT_MAX) /* all of the above */ |
| 358 | 386 | ||
| 359 | /* i2c_client_address_data is the struct for holding default client | 387 | /* i2c_client_address_data is the struct for holding default client |
| @@ -537,7 +565,7 @@ union i2c_smbus_data { | |||
| 537 | /* and one more for user-space compatibility */ | 565 | /* and one more for user-space compatibility */ |
| 538 | }; | 566 | }; |
| 539 | 567 | ||
| 540 | /* smbus_access read or write markers */ | 568 | /* i2c_smbus_xfer read or write markers */ |
| 541 | #define I2C_SMBUS_READ 1 | 569 | #define I2C_SMBUS_READ 1 |
| 542 | #define I2C_SMBUS_WRITE 0 | 570 | #define I2C_SMBUS_WRITE 0 |
| 543 | 571 | ||
diff --git a/include/linux/i2c/at24.h b/include/linux/i2c/at24.h new file mode 100644 index 000000000000..f6edd522a929 --- /dev/null +++ b/include/linux/i2c/at24.h | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | #ifndef _LINUX_AT24_H | ||
| 2 | #define _LINUX_AT24_H | ||
| 3 | |||
| 4 | #include <linux/types.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | * As seen through Linux I2C, differences between the most common types of I2C | ||
| 8 | * memory include: | ||
| 9 | * - How much memory is available (usually specified in bit)? | ||
| 10 | * - What write page size does it support? | ||
| 11 | * - Special flags (16 bit addresses, read_only, world readable...)? | ||
| 12 | * | ||
| 13 | * If you set up a custom eeprom type, please double-check the parameters. | ||
| 14 | * Especially page_size needs extra care, as you risk data loss if your value | ||
| 15 | * is bigger than what the chip actually supports! | ||
| 16 | */ | ||
| 17 | |||
| 18 | struct at24_platform_data { | ||
| 19 | u32 byte_len; /* size (sum of all addr) */ | ||
| 20 | u16 page_size; /* for writes */ | ||
| 21 | u8 flags; | ||
| 22 | #define AT24_FLAG_ADDR16 0x80 /* address pointer is 16 bit */ | ||
| 23 | #define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */ | ||
| 24 | #define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ | ||
| 25 | #define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ | ||
| 26 | }; | ||
| 27 | |||
| 28 | #endif /* _LINUX_AT24_H */ | ||
diff --git a/include/linux/ide.h b/include/linux/ide.h index eddb6daadf4a..ac4eeb2932ef 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
| @@ -364,7 +364,6 @@ typedef struct ide_drive_s { | |||
| 364 | u8 wcache; /* status of write cache */ | 364 | u8 wcache; /* status of write cache */ |
| 365 | u8 acoustic; /* acoustic management */ | 365 | u8 acoustic; /* acoustic management */ |
| 366 | u8 media; /* disk, cdrom, tape, floppy, ... */ | 366 | u8 media; /* disk, cdrom, tape, floppy, ... */ |
| 367 | u8 ctl; /* "normal" value for Control register */ | ||
| 368 | u8 ready_stat; /* min status value for drive ready */ | 367 | u8 ready_stat; /* min status value for drive ready */ |
| 369 | u8 mult_count; /* current multiple sector setting */ | 368 | u8 mult_count; /* current multiple sector setting */ |
| 370 | u8 mult_req; /* requested multiple sector setting */ | 369 | u8 mult_req; /* requested multiple sector setting */ |
| @@ -493,7 +492,7 @@ typedef struct hwif_s { | |||
| 493 | void (*ide_dma_clear_irq)(ide_drive_t *drive); | 492 | void (*ide_dma_clear_irq)(ide_drive_t *drive); |
| 494 | 493 | ||
| 495 | void (*OUTB)(u8 addr, unsigned long port); | 494 | void (*OUTB)(u8 addr, unsigned long port); |
| 496 | void (*OUTBSYNC)(ide_drive_t *drive, u8 addr, unsigned long port); | 495 | void (*OUTBSYNC)(struct hwif_s *hwif, u8 addr, unsigned long port); |
| 497 | 496 | ||
| 498 | u8 (*INB)(unsigned long port); | 497 | u8 (*INB)(unsigned long port); |
| 499 | 498 | ||
| @@ -532,7 +531,6 @@ typedef struct hwif_s { | |||
| 532 | unsigned serialized : 1; /* serialized all channel operation */ | 531 | unsigned serialized : 1; /* serialized all channel operation */ |
| 533 | unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */ | 532 | unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */ |
| 534 | unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ | 533 | unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ |
| 535 | unsigned mmio : 1; /* host uses MMIO */ | ||
| 536 | 534 | ||
| 537 | struct device gendev; | 535 | struct device gendev; |
| 538 | struct device *portdev; | 536 | struct device *portdev; |
| @@ -604,12 +602,13 @@ enum { | |||
| 604 | PC_FLAG_SUPPRESS_ERROR = (1 << 1), | 602 | PC_FLAG_SUPPRESS_ERROR = (1 << 1), |
| 605 | PC_FLAG_WAIT_FOR_DSC = (1 << 2), | 603 | PC_FLAG_WAIT_FOR_DSC = (1 << 2), |
| 606 | PC_FLAG_DMA_OK = (1 << 3), | 604 | PC_FLAG_DMA_OK = (1 << 3), |
| 607 | PC_FLAG_DMA_RECOMMENDED = (1 << 4), | 605 | PC_FLAG_DMA_IN_PROGRESS = (1 << 4), |
| 608 | PC_FLAG_DMA_IN_PROGRESS = (1 << 5), | 606 | PC_FLAG_DMA_ERROR = (1 << 5), |
| 609 | PC_FLAG_DMA_ERROR = (1 << 6), | 607 | PC_FLAG_WRITING = (1 << 6), |
| 610 | PC_FLAG_WRITING = (1 << 7), | ||
| 611 | /* command timed out */ | 608 | /* command timed out */ |
| 612 | PC_FLAG_TIMEDOUT = (1 << 8), | 609 | PC_FLAG_TIMEDOUT = (1 << 7), |
| 610 | PC_FLAG_ZIP_DRIVE = (1 << 8), | ||
| 611 | PC_FLAG_DRQ_INTERRUPT = (1 << 9), | ||
| 613 | }; | 612 | }; |
| 614 | 613 | ||
| 615 | struct ide_atapi_pc { | 614 | struct ide_atapi_pc { |
| @@ -642,8 +641,8 @@ struct ide_atapi_pc { | |||
| 642 | * to change/removal later. | 641 | * to change/removal later. |
| 643 | */ | 642 | */ |
| 644 | u8 pc_buf[256]; | 643 | u8 pc_buf[256]; |
| 645 | void (*idefloppy_callback) (ide_drive_t *); | 644 | |
| 646 | ide_startstop_t (*idetape_callback) (ide_drive_t *); | 645 | void (*callback)(ide_drive_t *); |
| 647 | 646 | ||
| 648 | /* idetape only */ | 647 | /* idetape only */ |
| 649 | struct idetape_bh *bh; | 648 | struct idetape_bh *bh; |
| @@ -813,10 +812,6 @@ int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsig | |||
| 813 | #ifndef _IDE_C | 812 | #ifndef _IDE_C |
| 814 | extern ide_hwif_t ide_hwifs[]; /* master data repository */ | 813 | extern ide_hwif_t ide_hwifs[]; /* master data repository */ |
| 815 | #endif | 814 | #endif |
| 816 | extern int ide_noacpi; | ||
| 817 | extern int ide_acpigtf; | ||
| 818 | extern int ide_acpionboot; | ||
| 819 | extern int noautodma; | ||
| 820 | 815 | ||
| 821 | extern int ide_vlb_clk; | 816 | extern int ide_vlb_clk; |
| 822 | extern int ide_pci_clk; | 817 | extern int ide_pci_clk; |
| @@ -857,25 +852,12 @@ int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long); | |||
| 857 | 852 | ||
| 858 | extern ide_startstop_t ide_do_reset (ide_drive_t *); | 853 | extern ide_startstop_t ide_do_reset (ide_drive_t *); |
| 859 | 854 | ||
| 860 | extern void ide_init_drive_cmd (struct request *rq); | 855 | extern void ide_do_drive_cmd(ide_drive_t *, struct request *); |
| 861 | |||
| 862 | /* | ||
| 863 | * "action" parameter type for ide_do_drive_cmd() below. | ||
| 864 | */ | ||
| 865 | typedef enum { | ||
| 866 | ide_wait, /* insert rq at end of list, and wait for it */ | ||
| 867 | ide_preempt, /* insert rq in front of current request */ | ||
| 868 | ide_head_wait, /* insert rq in front of current request and wait for it */ | ||
| 869 | ide_end /* insert rq at end of list, but don't wait for it */ | ||
| 870 | } ide_action_t; | ||
| 871 | |||
| 872 | extern int ide_do_drive_cmd(ide_drive_t *, struct request *, ide_action_t); | ||
| 873 | 856 | ||
| 874 | extern void ide_end_drive_cmd(ide_drive_t *, u8, u8); | 857 | extern void ide_end_drive_cmd(ide_drive_t *, u8, u8); |
| 875 | 858 | ||
| 876 | enum { | 859 | enum { |
| 877 | IDE_TFLAG_LBA48 = (1 << 0), | 860 | IDE_TFLAG_LBA48 = (1 << 0), |
| 878 | IDE_TFLAG_NO_SELECT_MASK = (1 << 1), | ||
| 879 | IDE_TFLAG_FLAGGED = (1 << 2), | 861 | IDE_TFLAG_FLAGGED = (1 << 2), |
| 880 | IDE_TFLAG_OUT_DATA = (1 << 3), | 862 | IDE_TFLAG_OUT_DATA = (1 << 3), |
| 881 | IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4), | 863 | IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4), |
| @@ -980,11 +962,23 @@ typedef struct ide_task_s { | |||
| 980 | void ide_tf_dump(const char *, struct ide_taskfile *); | 962 | void ide_tf_dump(const char *, struct ide_taskfile *); |
| 981 | 963 | ||
| 982 | extern void SELECT_DRIVE(ide_drive_t *); | 964 | extern void SELECT_DRIVE(ide_drive_t *); |
| 965 | void SELECT_MASK(ide_drive_t *, int); | ||
| 983 | 966 | ||
| 984 | extern int drive_is_ready(ide_drive_t *); | 967 | extern int drive_is_ready(ide_drive_t *); |
| 985 | 968 | ||
| 986 | void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8); | 969 | void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8); |
| 987 | 970 | ||
| 971 | ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc, | ||
| 972 | ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry, | ||
| 973 | void (*update_buffers)(ide_drive_t *, struct ide_atapi_pc *), | ||
| 974 | void (*retry_pc)(ide_drive_t *), void (*dsc_handle)(ide_drive_t *), | ||
| 975 | void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned int, | ||
| 976 | int)); | ||
| 977 | ide_startstop_t ide_transfer_pc(ide_drive_t *, struct ide_atapi_pc *, | ||
| 978 | ide_handler_t *, unsigned int, ide_expiry_t *); | ||
| 979 | ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_atapi_pc *, | ||
| 980 | ide_handler_t *, unsigned int, ide_expiry_t *); | ||
| 981 | |||
| 988 | ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *); | 982 | ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *); |
| 989 | 983 | ||
| 990 | void task_end_request(ide_drive_t *, struct request *, u8); | 984 | void task_end_request(ide_drive_t *, struct request *, u8); |
| @@ -996,8 +990,6 @@ int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long); | |||
| 996 | int ide_cmd_ioctl(ide_drive_t *, unsigned int, unsigned long); | 990 | int ide_cmd_ioctl(ide_drive_t *, unsigned int, unsigned long); |
| 997 | int ide_task_ioctl(ide_drive_t *, unsigned int, unsigned long); | 991 | int ide_task_ioctl(ide_drive_t *, unsigned int, unsigned long); |
| 998 | 992 | ||
| 999 | extern int system_bus_clock(void); | ||
| 1000 | |||
| 1001 | extern int ide_driveid_update(ide_drive_t *); | 993 | extern int ide_driveid_update(ide_drive_t *); |
| 1002 | extern int ide_config_drive_speed(ide_drive_t *, u8); | 994 | extern int ide_config_drive_speed(ide_drive_t *, u8); |
| 1003 | extern u8 eighty_ninty_three (ide_drive_t *); | 995 | extern u8 eighty_ninty_three (ide_drive_t *); |
| @@ -1349,7 +1341,8 @@ static inline void ide_set_irq(ide_drive_t *drive, int on) | |||
| 1349 | { | 1341 | { |
| 1350 | ide_hwif_t *hwif = drive->hwif; | 1342 | ide_hwif_t *hwif = drive->hwif; |
| 1351 | 1343 | ||
| 1352 | hwif->OUTB(drive->ctl | (on ? 0 : 2), hwif->io_ports.ctl_addr); | 1344 | hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | (on ? 0 : 2), |
| 1345 | hwif->io_ports.ctl_addr); | ||
| 1353 | } | 1346 | } |
| 1354 | 1347 | ||
| 1355 | static inline u8 ide_read_status(ide_drive_t *drive) | 1348 | static inline u8 ide_read_status(ide_drive_t *drive) |
diff --git a/include/linux/ihex.h b/include/linux/ihex.h new file mode 100644 index 000000000000..2baace2788a7 --- /dev/null +++ b/include/linux/ihex.h | |||
| @@ -0,0 +1,74 @@ | |||
| 1 | /* | ||
| 2 | * Compact binary representation of ihex records. Some devices need their | ||
| 3 | * firmware loaded in strange orders rather than a single big blob, but | ||
| 4 | * actually parsing ihex-as-text within the kernel seems silly. Thus,... | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef __LINUX_IHEX_H__ | ||
| 8 | #define __LINUX_IHEX_H__ | ||
| 9 | |||
| 10 | #include <linux/types.h> | ||
| 11 | #include <linux/firmware.h> | ||
| 12 | #include <linux/device.h> | ||
| 13 | |||
| 14 | /* Intel HEX files actually limit the length to 256 bytes, but we have | ||
| 15 | drivers which would benefit from using separate records which are | ||
| 16 | longer than that, so we extend to 16 bits of length */ | ||
| 17 | struct ihex_binrec { | ||
| 18 | __be32 addr; | ||
| 19 | __be16 len; | ||
| 20 | uint8_t data[0]; | ||
| 21 | } __attribute__((aligned(4))); | ||
| 22 | |||
| 23 | /* Find the next record, taking into account the 4-byte alignment */ | ||
| 24 | static inline const struct ihex_binrec * | ||
| 25 | ihex_next_binrec(const struct ihex_binrec *rec) | ||
| 26 | { | ||
| 27 | int next = ((be16_to_cpu(rec->len) + 5) & ~3) - 2; | ||
| 28 | rec = (void *)&rec->data[next]; | ||
| 29 | |||
| 30 | return be16_to_cpu(rec->len) ? rec : NULL; | ||
| 31 | } | ||
| 32 | |||
| 33 | /* Check that ihex_next_binrec() won't take us off the end of the image... */ | ||
| 34 | static inline int ihex_validate_fw(const struct firmware *fw) | ||
| 35 | { | ||
| 36 | const struct ihex_binrec *rec; | ||
| 37 | size_t ofs = 0; | ||
| 38 | |||
| 39 | while (ofs <= fw->size - sizeof(*rec)) { | ||
| 40 | rec = (void *)&fw->data[ofs]; | ||
| 41 | |||
| 42 | /* Zero length marks end of records */ | ||
| 43 | if (!be16_to_cpu(rec->len)) | ||
| 44 | return 0; | ||
| 45 | |||
| 46 | /* Point to next record... */ | ||
| 47 | ofs += (sizeof(*rec) + be16_to_cpu(rec->len) + 3) & ~3; | ||
| 48 | } | ||
| 49 | return -EINVAL; | ||
| 50 | } | ||
| 51 | |||
| 52 | /* Request firmware and validate it so that we can trust we won't | ||
| 53 | * run off the end while reading records... */ | ||
| 54 | static inline int request_ihex_firmware(const struct firmware **fw, | ||
| 55 | const char *fw_name, | ||
| 56 | struct device *dev) | ||
| 57 | { | ||
| 58 | const struct firmware *lfw; | ||
| 59 | int ret; | ||
| 60 | |||
| 61 | ret = request_firmware(&lfw, fw_name, dev); | ||
| 62 | if (ret) | ||
| 63 | return ret; | ||
| 64 | ret = ihex_validate_fw(lfw); | ||
| 65 | if (ret) { | ||
| 66 | dev_err(dev, "Firmware \"%s\" not valid IHEX records\n", | ||
| 67 | fw_name); | ||
| 68 | release_firmware(lfw); | ||
| 69 | return ret; | ||
| 70 | } | ||
| 71 | *fw = lfw; | ||
| 72 | return 0; | ||
| 73 | } | ||
| 74 | #endif /* __LINUX_IHEX_H__ */ | ||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index a86186dd0474..62aa4f895abe 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -104,8 +104,11 @@ extern void enable_irq(unsigned int irq); | |||
| 104 | 104 | ||
| 105 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | 105 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) |
| 106 | 106 | ||
| 107 | extern cpumask_t irq_default_affinity; | ||
| 108 | |||
| 107 | extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); | 109 | extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); |
| 108 | extern int irq_can_set_affinity(unsigned int irq); | 110 | extern int irq_can_set_affinity(unsigned int irq); |
| 111 | extern int irq_select_affinity(unsigned int irq); | ||
| 109 | 112 | ||
| 110 | #else /* CONFIG_SMP */ | 113 | #else /* CONFIG_SMP */ |
| 111 | 114 | ||
| @@ -119,6 +122,8 @@ static inline int irq_can_set_affinity(unsigned int irq) | |||
| 119 | return 0; | 122 | return 0; |
| 120 | } | 123 | } |
| 121 | 124 | ||
| 125 | static inline int irq_select_affinity(unsigned int irq) { return 0; } | ||
| 126 | |||
| 122 | #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */ | 127 | #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */ |
| 123 | 128 | ||
| 124 | #ifdef CONFIG_GENERIC_HARDIRQS | 129 | #ifdef CONFIG_GENERIC_HARDIRQS |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 552e0ec269c9..8ccb462ea42c 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -244,15 +244,6 @@ static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask) | |||
| 244 | } | 244 | } |
| 245 | #endif | 245 | #endif |
| 246 | 246 | ||
| 247 | #ifdef CONFIG_AUTO_IRQ_AFFINITY | ||
| 248 | extern int select_smp_affinity(unsigned int irq); | ||
| 249 | #else | ||
| 250 | static inline int select_smp_affinity(unsigned int irq) | ||
| 251 | { | ||
| 252 | return 1; | ||
| 253 | } | ||
| 254 | #endif | ||
| 255 | |||
| 256 | extern int no_irq_affinity; | 247 | extern int no_irq_affinity; |
| 257 | 248 | ||
| 258 | static inline int irq_balancing_disabled(unsigned int irq) | 249 | static inline int irq_balancing_disabled(unsigned int irq) |
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index d147f0f90360..3dd209007098 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h | |||
| @@ -168,6 +168,8 @@ struct commit_header { | |||
| 168 | unsigned char h_chksum_size; | 168 | unsigned char h_chksum_size; |
| 169 | unsigned char h_padding[2]; | 169 | unsigned char h_padding[2]; |
| 170 | __be32 h_chksum[JBD2_CHECKSUM_BYTES]; | 170 | __be32 h_chksum[JBD2_CHECKSUM_BYTES]; |
| 171 | __be64 h_commit_sec; | ||
| 172 | __be32 h_commit_nsec; | ||
| 171 | }; | 173 | }; |
| 172 | 174 | ||
| 173 | /* | 175 | /* |
| @@ -379,6 +381,38 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) | |||
| 379 | bit_spin_unlock(BH_JournalHead, &bh->b_state); | 381 | bit_spin_unlock(BH_JournalHead, &bh->b_state); |
| 380 | } | 382 | } |
| 381 | 383 | ||
| 384 | /* Flags in jbd_inode->i_flags */ | ||
| 385 | #define __JI_COMMIT_RUNNING 0 | ||
| 386 | /* Commit of the inode data in progress. We use this flag to protect us from | ||
| 387 | * concurrent deletion of inode. We cannot use reference to inode for this | ||
| 388 | * since we cannot afford doing last iput() on behalf of kjournald | ||
| 389 | */ | ||
| 390 | #define JI_COMMIT_RUNNING (1 << __JI_COMMIT_RUNNING) | ||
| 391 | |||
| 392 | /** | ||
| 393 | * struct jbd_inode is the structure linking inodes in ordered mode | ||
| 394 | * present in a transaction so that we can sync them during commit. | ||
| 395 | */ | ||
| 396 | struct jbd2_inode { | ||
| 397 | /* Which transaction does this inode belong to? Either the running | ||
| 398 | * transaction or the committing one. [j_list_lock] */ | ||
| 399 | transaction_t *i_transaction; | ||
| 400 | |||
| 401 | /* Pointer to the running transaction modifying inode's data in case | ||
| 402 | * there is already a committing transaction touching it. [j_list_lock] */ | ||
| 403 | transaction_t *i_next_transaction; | ||
| 404 | |||
| 405 | /* List of inodes in the i_transaction [j_list_lock] */ | ||
| 406 | struct list_head i_list; | ||
| 407 | |||
| 408 | /* VFS inode this inode belongs to [constant during the lifetime | ||
| 409 | * of the structure] */ | ||
| 410 | struct inode *i_vfs_inode; | ||
| 411 | |||
| 412 | /* Flags of inode [j_list_lock] */ | ||
| 413 | unsigned int i_flags; | ||
| 414 | }; | ||
| 415 | |||
| 382 | struct jbd2_revoke_table_s; | 416 | struct jbd2_revoke_table_s; |
| 383 | 417 | ||
| 384 | /** | 418 | /** |
| @@ -509,24 +543,12 @@ struct transaction_s | |||
| 509 | struct journal_head *t_reserved_list; | 543 | struct journal_head *t_reserved_list; |
| 510 | 544 | ||
| 511 | /* | 545 | /* |
| 512 | * Doubly-linked circular list of all buffers under writeout during | ||
| 513 | * commit [j_list_lock] | ||
| 514 | */ | ||
| 515 | struct journal_head *t_locked_list; | ||
| 516 | |||
| 517 | /* | ||
| 518 | * Doubly-linked circular list of all metadata buffers owned by this | 546 | * Doubly-linked circular list of all metadata buffers owned by this |
| 519 | * transaction [j_list_lock] | 547 | * transaction [j_list_lock] |
| 520 | */ | 548 | */ |
| 521 | struct journal_head *t_buffers; | 549 | struct journal_head *t_buffers; |
| 522 | 550 | ||
| 523 | /* | 551 | /* |
| 524 | * Doubly-linked circular list of all data buffers still to be | ||
| 525 | * flushed before this transaction can be committed [j_list_lock] | ||
| 526 | */ | ||
| 527 | struct journal_head *t_sync_datalist; | ||
| 528 | |||
| 529 | /* | ||
| 530 | * Doubly-linked circular list of all forget buffers (superseded | 552 | * Doubly-linked circular list of all forget buffers (superseded |
| 531 | * buffers which we can un-checkpoint once this transaction commits) | 553 | * buffers which we can un-checkpoint once this transaction commits) |
| 532 | * [j_list_lock] | 554 | * [j_list_lock] |
| @@ -565,6 +587,12 @@ struct transaction_s | |||
| 565 | struct journal_head *t_log_list; | 587 | struct journal_head *t_log_list; |
| 566 | 588 | ||
| 567 | /* | 589 | /* |
| 590 | * List of inodes whose data we've modified in data=ordered mode. | ||
| 591 | * [j_list_lock] | ||
| 592 | */ | ||
| 593 | struct list_head t_inode_list; | ||
| 594 | |||
| 595 | /* | ||
| 568 | * Protects info related to handles | 596 | * Protects info related to handles |
| 569 | */ | 597 | */ |
| 570 | spinlock_t t_handle_lock; | 598 | spinlock_t t_handle_lock; |
| @@ -1004,7 +1032,6 @@ extern int jbd2_journal_extend (handle_t *, int nblocks); | |||
| 1004 | extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *); | 1032 | extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *); |
| 1005 | extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *); | 1033 | extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *); |
| 1006 | extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *); | 1034 | extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *); |
| 1007 | extern int jbd2_journal_dirty_data (handle_t *, struct buffer_head *); | ||
| 1008 | extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); | 1035 | extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); |
| 1009 | extern void jbd2_journal_release_buffer (handle_t *, struct buffer_head *); | 1036 | extern void jbd2_journal_release_buffer (handle_t *, struct buffer_head *); |
| 1010 | extern int jbd2_journal_forget (handle_t *, struct buffer_head *); | 1037 | extern int jbd2_journal_forget (handle_t *, struct buffer_head *); |
| @@ -1044,6 +1071,10 @@ extern void jbd2_journal_ack_err (journal_t *); | |||
| 1044 | extern int jbd2_journal_clear_err (journal_t *); | 1071 | extern int jbd2_journal_clear_err (journal_t *); |
| 1045 | extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *); | 1072 | extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *); |
| 1046 | extern int jbd2_journal_force_commit(journal_t *); | 1073 | extern int jbd2_journal_force_commit(journal_t *); |
| 1074 | extern int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode); | ||
| 1075 | extern int jbd2_journal_begin_ordered_truncate(struct jbd2_inode *inode, loff_t new_size); | ||
| 1076 | extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode); | ||
| 1077 | extern void jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_inode *jinode); | ||
| 1047 | 1078 | ||
| 1048 | /* | 1079 | /* |
| 1049 | * journal_head management | 1080 | * journal_head management |
| @@ -1179,15 +1210,13 @@ static inline int jbd_space_needed(journal_t *journal) | |||
| 1179 | 1210 | ||
| 1180 | /* journaling buffer types */ | 1211 | /* journaling buffer types */ |
| 1181 | #define BJ_None 0 /* Not journaled */ | 1212 | #define BJ_None 0 /* Not journaled */ |
| 1182 | #define BJ_SyncData 1 /* Normal data: flush before commit */ | 1213 | #define BJ_Metadata 1 /* Normal journaled metadata */ |
| 1183 | #define BJ_Metadata 2 /* Normal journaled metadata */ | 1214 | #define BJ_Forget 2 /* Buffer superseded by this transaction */ |
| 1184 | #define BJ_Forget 3 /* Buffer superseded by this transaction */ | 1215 | #define BJ_IO 3 /* Buffer is for temporary IO use */ |
| 1185 | #define BJ_IO 4 /* Buffer is for temporary IO use */ | 1216 | #define BJ_Shadow 4 /* Buffer contents being shadowed to the log */ |
| 1186 | #define BJ_Shadow 5 /* Buffer contents being shadowed to the log */ | 1217 | #define BJ_LogCtl 5 /* Buffer contains log descriptors */ |
| 1187 | #define BJ_LogCtl 6 /* Buffer contains log descriptors */ | 1218 | #define BJ_Reserved 6 /* Buffer is reserved for access by journal */ |
| 1188 | #define BJ_Reserved 7 /* Buffer is reserved for access by journal */ | 1219 | #define BJ_Types 7 |
| 1189 | #define BJ_Locked 8 /* Locked for I/O during commit */ | ||
| 1190 | #define BJ_Types 9 | ||
| 1191 | 1220 | ||
| 1192 | extern int jbd_blocks_per_page(struct inode *inode); | 1221 | extern int jbd_blocks_per_page(struct inode *inode); |
| 1193 | 1222 | ||
diff --git a/include/linux/libata.h b/include/linux/libata.h index e57e5d08312d..5b247b8a6b3b 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #define __LINUX_LIBATA_H__ | 27 | #define __LINUX_LIBATA_H__ |
| 28 | 28 | ||
| 29 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
| 30 | #include <linux/jiffies.h> | ||
| 30 | #include <linux/interrupt.h> | 31 | #include <linux/interrupt.h> |
| 31 | #include <linux/dma-mapping.h> | 32 | #include <linux/dma-mapping.h> |
| 32 | #include <linux/scatterlist.h> | 33 | #include <linux/scatterlist.h> |
| @@ -115,7 +116,7 @@ enum { | |||
| 115 | /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */ | 116 | /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */ |
| 116 | ATA_MAX_QUEUE = 32, | 117 | ATA_MAX_QUEUE = 32, |
| 117 | ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1, | 118 | ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1, |
| 118 | ATA_SHORT_PAUSE = (HZ >> 6) + 1, | 119 | ATA_SHORT_PAUSE = 16, |
| 119 | 120 | ||
| 120 | ATAPI_MAX_DRAIN = 16 << 10, | 121 | ATAPI_MAX_DRAIN = 16 << 10, |
| 121 | 122 | ||
| @@ -168,6 +169,7 @@ enum { | |||
| 168 | ATA_LFLAG_ASSUME_CLASS = ATA_LFLAG_ASSUME_ATA | ATA_LFLAG_ASSUME_SEMB, | 169 | ATA_LFLAG_ASSUME_CLASS = ATA_LFLAG_ASSUME_ATA | ATA_LFLAG_ASSUME_SEMB, |
| 169 | ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */ | 170 | ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */ |
| 170 | ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */ | 171 | ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */ |
| 172 | ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */ | ||
| 171 | 173 | ||
| 172 | /* struct ata_port flags */ | 174 | /* struct ata_port flags */ |
| 173 | ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ | 175 | ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ |
| @@ -190,6 +192,10 @@ enum { | |||
| 190 | ATA_FLAG_AN = (1 << 18), /* controller supports AN */ | 192 | ATA_FLAG_AN = (1 << 18), /* controller supports AN */ |
| 191 | ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */ | 193 | ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */ |
| 192 | ATA_FLAG_IPM = (1 << 20), /* driver can handle IPM */ | 194 | ATA_FLAG_IPM = (1 << 20), /* driver can handle IPM */ |
| 195 | ATA_FLAG_EM = (1 << 21), /* driver supports enclosure | ||
| 196 | * management */ | ||
| 197 | ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity | ||
| 198 | * led */ | ||
| 193 | 199 | ||
| 194 | /* The following flag belongs to ap->pflags but is kept in | 200 | /* The following flag belongs to ap->pflags but is kept in |
| 195 | * ap->flags because it's referenced in many LLDs and will be | 201 | * ap->flags because it's referenced in many LLDs and will be |
| @@ -234,17 +240,16 @@ enum { | |||
| 234 | /* bits 24:31 of host->flags are reserved for LLD specific flags */ | 240 | /* bits 24:31 of host->flags are reserved for LLD specific flags */ |
| 235 | 241 | ||
| 236 | /* various lengths of time */ | 242 | /* various lengths of time */ |
| 237 | ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ | 243 | ATA_TMOUT_BOOT = 30000, /* heuristic */ |
| 238 | ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ | 244 | ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */ |
| 239 | ATA_TMOUT_INTERNAL = 30 * HZ, | 245 | ATA_TMOUT_INTERNAL_QUICK = 5000, |
| 240 | ATA_TMOUT_INTERNAL_QUICK = 5 * HZ, | ||
| 241 | 246 | ||
| 242 | /* FIXME: GoVault needs 2s but we can't afford that without | 247 | /* FIXME: GoVault needs 2s but we can't afford that without |
| 243 | * parallel probing. 800ms is enough for iVDR disk | 248 | * parallel probing. 800ms is enough for iVDR disk |
| 244 | * HHD424020F7SV00. Increase to 2secs when parallel probing | 249 | * HHD424020F7SV00. Increase to 2secs when parallel probing |
| 245 | * is in place. | 250 | * is in place. |
| 246 | */ | 251 | */ |
| 247 | ATA_TMOUT_FF_WAIT = 4 * HZ / 5, | 252 | ATA_TMOUT_FF_WAIT = 800, |
| 248 | 253 | ||
| 249 | /* Spec mandates to wait for ">= 2ms" before checking status | 254 | /* Spec mandates to wait for ">= 2ms" before checking status |
| 250 | * after reset. We wait 150ms, because that was the magic | 255 | * after reset. We wait 150ms, because that was the magic |
| @@ -256,14 +261,14 @@ enum { | |||
| 256 | * | 261 | * |
| 257 | * Old drivers/ide uses the 2mS rule and then waits for ready. | 262 | * Old drivers/ide uses the 2mS rule and then waits for ready. |
| 258 | */ | 263 | */ |
| 259 | ATA_WAIT_AFTER_RESET_MSECS = 150, | 264 | ATA_WAIT_AFTER_RESET = 150, |
| 260 | 265 | ||
| 261 | /* If PMP is supported, we have to do follow-up SRST. As some | 266 | /* If PMP is supported, we have to do follow-up SRST. As some |
| 262 | * PMPs don't send D2H Reg FIS after hardreset, LLDs are | 267 | * PMPs don't send D2H Reg FIS after hardreset, LLDs are |
| 263 | * advised to wait only for the following duration before | 268 | * advised to wait only for the following duration before |
| 264 | * doing SRST. | 269 | * doing SRST. |
| 265 | */ | 270 | */ |
| 266 | ATA_TMOUT_PMP_SRST_WAIT = 1 * HZ, | 271 | ATA_TMOUT_PMP_SRST_WAIT = 1000, |
| 267 | 272 | ||
| 268 | /* ATA bus states */ | 273 | /* ATA bus states */ |
| 269 | BUS_UNKNOWN = 0, | 274 | BUS_UNKNOWN = 0, |
| @@ -340,6 +345,11 @@ enum { | |||
| 340 | 345 | ||
| 341 | SATA_PMP_RW_TIMEOUT = 3000, /* PMP read/write timeout */ | 346 | SATA_PMP_RW_TIMEOUT = 3000, /* PMP read/write timeout */ |
| 342 | 347 | ||
| 348 | /* This should match the actual table size of | ||
| 349 | * ata_eh_cmd_timeout_table in libata-eh.c. | ||
| 350 | */ | ||
| 351 | ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 5, | ||
| 352 | |||
| 343 | /* Horkage types. May be set by libata or controller on drives | 353 | /* Horkage types. May be set by libata or controller on drives |
| 344 | (some horkage may be drive/controller pair dependant */ | 354 | (some horkage may be drive/controller pair dependant */ |
| 345 | 355 | ||
| @@ -441,6 +451,15 @@ enum link_pm { | |||
| 441 | MEDIUM_POWER, | 451 | MEDIUM_POWER, |
| 442 | }; | 452 | }; |
| 443 | extern struct device_attribute dev_attr_link_power_management_policy; | 453 | extern struct device_attribute dev_attr_link_power_management_policy; |
| 454 | extern struct device_attribute dev_attr_em_message_type; | ||
| 455 | extern struct device_attribute dev_attr_em_message; | ||
| 456 | extern struct device_attribute dev_attr_sw_activity; | ||
| 457 | |||
| 458 | enum sw_activity { | ||
| 459 | OFF, | ||
| 460 | BLINK_ON, | ||
| 461 | BLINK_OFF, | ||
| 462 | }; | ||
| 444 | 463 | ||
| 445 | #ifdef CONFIG_ATA_SFF | 464 | #ifdef CONFIG_ATA_SFF |
| 446 | struct ata_ioports { | 465 | struct ata_ioports { |
| @@ -597,10 +616,14 @@ struct ata_eh_info { | |||
| 597 | struct ata_eh_context { | 616 | struct ata_eh_context { |
| 598 | struct ata_eh_info i; | 617 | struct ata_eh_info i; |
| 599 | int tries[ATA_MAX_DEVICES]; | 618 | int tries[ATA_MAX_DEVICES]; |
| 619 | int cmd_timeout_idx[ATA_MAX_DEVICES] | ||
| 620 | [ATA_EH_CMD_TIMEOUT_TABLE_SIZE]; | ||
| 600 | unsigned int classes[ATA_MAX_DEVICES]; | 621 | unsigned int classes[ATA_MAX_DEVICES]; |
| 601 | unsigned int did_probe_mask; | 622 | unsigned int did_probe_mask; |
| 602 | unsigned int saved_ncq_enabled; | 623 | unsigned int saved_ncq_enabled; |
| 603 | u8 saved_xfer_mode[ATA_MAX_DEVICES]; | 624 | u8 saved_xfer_mode[ATA_MAX_DEVICES]; |
| 625 | /* timestamp for the last reset attempt or success */ | ||
| 626 | unsigned long last_reset; | ||
| 604 | }; | 627 | }; |
| 605 | 628 | ||
| 606 | struct ata_acpi_drive | 629 | struct ata_acpi_drive |
| @@ -692,6 +715,7 @@ struct ata_port { | |||
| 692 | struct timer_list fastdrain_timer; | 715 | struct timer_list fastdrain_timer; |
| 693 | unsigned long fastdrain_cnt; | 716 | unsigned long fastdrain_cnt; |
| 694 | 717 | ||
| 718 | int em_message_type; | ||
| 695 | void *private_data; | 719 | void *private_data; |
| 696 | 720 | ||
| 697 | #ifdef CONFIG_ATA_ACPI | 721 | #ifdef CONFIG_ATA_ACPI |
| @@ -783,6 +807,12 @@ struct ata_port_operations { | |||
| 783 | u8 (*bmdma_status)(struct ata_port *ap); | 807 | u8 (*bmdma_status)(struct ata_port *ap); |
| 784 | #endif /* CONFIG_ATA_SFF */ | 808 | #endif /* CONFIG_ATA_SFF */ |
| 785 | 809 | ||
| 810 | ssize_t (*em_show)(struct ata_port *ap, char *buf); | ||
| 811 | ssize_t (*em_store)(struct ata_port *ap, const char *message, | ||
| 812 | size_t size); | ||
| 813 | ssize_t (*sw_activity_show)(struct ata_device *dev, char *buf); | ||
| 814 | ssize_t (*sw_activity_store)(struct ata_device *dev, | ||
| 815 | enum sw_activity val); | ||
| 786 | /* | 816 | /* |
| 787 | * Obsolete | 817 | * Obsolete |
| 788 | */ | 818 | */ |
| @@ -895,8 +925,7 @@ extern void ata_host_resume(struct ata_host *host); | |||
| 895 | #endif | 925 | #endif |
| 896 | extern int ata_ratelimit(void); | 926 | extern int ata_ratelimit(void); |
| 897 | extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, | 927 | extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, |
| 898 | unsigned long interval_msec, | 928 | unsigned long interval, unsigned long timeout); |
| 899 | unsigned long timeout_msec); | ||
| 900 | extern int atapi_cmd_type(u8 opcode); | 929 | extern int atapi_cmd_type(u8 opcode); |
| 901 | extern void ata_tf_to_fis(const struct ata_taskfile *tf, | 930 | extern void ata_tf_to_fis(const struct ata_taskfile *tf, |
| 902 | u8 pmp, int is_cmd, u8 *fis); | 931 | u8 pmp, int is_cmd, u8 *fis); |
| @@ -1389,6 +1418,12 @@ static inline int ata_check_ready(u8 status) | |||
| 1389 | return 0; | 1418 | return 0; |
| 1390 | } | 1419 | } |
| 1391 | 1420 | ||
| 1421 | static inline unsigned long ata_deadline(unsigned long from_jiffies, | ||
| 1422 | unsigned long timeout_msecs) | ||
| 1423 | { | ||
| 1424 | return from_jiffies + msecs_to_jiffies(timeout_msecs); | ||
| 1425 | } | ||
| 1426 | |||
| 1392 | 1427 | ||
| 1393 | /************************************************************************** | 1428 | /************************************************************************** |
| 1394 | * PMP - drivers/ata/libata-pmp.c | 1429 | * PMP - drivers/ata/libata-pmp.c |
diff --git a/include/linux/list.h b/include/linux/list.h index 08cf4f651889..139ec41d9c2e 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
| @@ -85,65 +85,6 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head) | |||
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | /* | 87 | /* |
| 88 | * Insert a new entry between two known consecutive entries. | ||
| 89 | * | ||
| 90 | * This is only for internal list manipulation where we know | ||
| 91 | * the prev/next entries already! | ||
| 92 | */ | ||
| 93 | static inline void __list_add_rcu(struct list_head * new, | ||
| 94 | struct list_head * prev, struct list_head * next) | ||
| 95 | { | ||
| 96 | new->next = next; | ||
| 97 | new->prev = prev; | ||
| 98 | smp_wmb(); | ||
| 99 | next->prev = new; | ||
| 100 | prev->next = new; | ||
| 101 | } | ||
| 102 | |||
| 103 | /** | ||
| 104 | * list_add_rcu - add a new entry to rcu-protected list | ||
| 105 | * @new: new entry to be added | ||
| 106 | * @head: list head to add it after | ||
| 107 | * | ||
| 108 | * Insert a new entry after the specified head. | ||
| 109 | * This is good for implementing stacks. | ||
| 110 | * | ||
| 111 | * The caller must take whatever precautions are necessary | ||
| 112 | * (such as holding appropriate locks) to avoid racing | ||
| 113 | * with another list-mutation primitive, such as list_add_rcu() | ||
| 114 | * or list_del_rcu(), running on this same list. | ||
| 115 | * However, it is perfectly legal to run concurrently with | ||
| 116 | * the _rcu list-traversal primitives, such as | ||
| 117 | * list_for_each_entry_rcu(). | ||
| 118 | */ | ||
| 119 | static inline void list_add_rcu(struct list_head *new, struct list_head *head) | ||
| 120 | { | ||
| 121 | __list_add_rcu(new, head, head->next); | ||
| 122 | } | ||
| 123 | |||
| 124 | /** | ||
| 125 | * list_add_tail_rcu - add a new entry to rcu-protected list | ||
| 126 | * @new: new entry to be added | ||
| 127 | * @head: list head to add it before | ||
| 128 | * | ||
| 129 | * Insert a new entry before the specified head. | ||
| 130 | * This is useful for implementing queues. | ||
| 131 | * | ||
| 132 | * The caller must take whatever precautions are necessary | ||
| 133 | * (such as holding appropriate locks) to avoid racing | ||
| 134 | * with another list-mutation primitive, such as list_add_tail_rcu() | ||
| 135 | * or list_del_rcu(), running on this same list. | ||
| 136 | * However, it is perfectly legal to run concurrently with | ||
| 137 | * the _rcu list-traversal primitives, such as | ||
| 138 | * list_for_each_entry_rcu(). | ||
| 139 | */ | ||
| 140 | static inline void list_add_tail_rcu(struct list_head *new, | ||
| 141 | struct list_head *head) | ||
| 142 | { | ||
| 143 | __list_add_rcu(new, head->prev, head); | ||
| 144 | } | ||
| 145 | |||
| 146 | /* | ||
| 147 | * Delete a list entry by making the prev/next entries | 88 | * Delete a list entry by making the prev/next entries |
| 148 | * point to each other. | 89 | * point to each other. |
| 149 | * | 90 | * |
| @@ -174,36 +115,6 @@ extern void list_del(struct list_head *entry); | |||
| 174 | #endif | 115 | #endif |
| 175 | 116 | ||
| 176 | /** | 117 | /** |
| 177 | * list_del_rcu - deletes entry from list without re-initialization | ||
| 178 | * @entry: the element to delete from the list. | ||
| 179 | * | ||
| 180 | * Note: list_empty() on entry does not return true after this, | ||
| 181 | * the entry is in an undefined state. It is useful for RCU based | ||
| 182 | * lockfree traversal. | ||
| 183 | * | ||
| 184 | * In particular, it means that we can not poison the forward | ||
| 185 | * pointers that may still be used for walking the list. | ||
| 186 | * | ||
| 187 | * The caller must take whatever precautions are necessary | ||
| 188 | * (such as holding appropriate locks) to avoid racing | ||
| 189 | * with another list-mutation primitive, such as list_del_rcu() | ||
| 190 | * or list_add_rcu(), running on this same list. | ||
| 191 | * However, it is perfectly legal to run concurrently with | ||
| 192 | * the _rcu list-traversal primitives, such as | ||
| 193 | * list_for_each_entry_rcu(). | ||
| 194 | * | ||
| 195 | * Note that the caller is not permitted to immediately free | ||
| 196 | * the newly deleted entry. Instead, either synchronize_rcu() | ||
| 197 | * or call_rcu() must be used to defer freeing until an RCU | ||
| 198 | * grace period has elapsed. | ||
| 199 | */ | ||
| 200 | static inline void list_del_rcu(struct list_head *entry) | ||
| 201 | { | ||
| 202 | __list_del(entry->prev, entry->next); | ||
| 203 | entry->prev = LIST_POISON2; | ||
| 204 | } | ||
| 205 | |||
| 206 | /** | ||
| 207 | * list_replace - replace old entry by new one | 118 | * list_replace - replace old entry by new one |
| 208 | * @old : the element to be replaced | 119 | * @old : the element to be replaced |
| 209 | * @new : the new element to insert | 120 | * @new : the new element to insert |
| @@ -227,25 +138,6 @@ static inline void list_replace_init(struct list_head *old, | |||
| 227 | } | 138 | } |
| 228 | 139 | ||
| 229 | /** | 140 | /** |
| 230 | * list_replace_rcu - replace old entry by new one | ||
| 231 | * @old : the element to be replaced | ||
| 232 | * @new : the new element to insert | ||
| 233 | * | ||
| 234 | * The @old entry will be replaced with the @new entry atomically. | ||
| 235 | * Note: @old should not be empty. | ||
| 236 | */ | ||
| 237 | static inline void list_replace_rcu(struct list_head *old, | ||
| 238 | struct list_head *new) | ||
| 239 | { | ||
| 240 | new->next = old->next; | ||
| 241 | new->prev = old->prev; | ||
| 242 | smp_wmb(); | ||
| 243 | new->next->prev = new; | ||
| 244 | new->prev->next = new; | ||
| 245 | old->prev = LIST_POISON2; | ||
| 246 | } | ||
| 247 | |||
| 248 | /** | ||
| 249 | * list_del_init - deletes entry from list and reinitialize it. | 141 | * list_del_init - deletes entry from list and reinitialize it. |
| 250 | * @entry: the element to delete from the list. | 142 | * @entry: the element to delete from the list. |
| 251 | */ | 143 | */ |
| @@ -369,62 +261,6 @@ static inline void list_splice_init(struct list_head *list, | |||
| 369 | } | 261 | } |
| 370 | 262 | ||
| 371 | /** | 263 | /** |
| 372 | * list_splice_init_rcu - splice an RCU-protected list into an existing list. | ||
| 373 | * @list: the RCU-protected list to splice | ||
| 374 | * @head: the place in the list to splice the first list into | ||
| 375 | * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... | ||
| 376 | * | ||
| 377 | * @head can be RCU-read traversed concurrently with this function. | ||
| 378 | * | ||
| 379 | * Note that this function blocks. | ||
| 380 | * | ||
| 381 | * Important note: the caller must take whatever action is necessary to | ||
| 382 | * prevent any other updates to @head. In principle, it is possible | ||
| 383 | * to modify the list as soon as sync() begins execution. | ||
| 384 | * If this sort of thing becomes necessary, an alternative version | ||
| 385 | * based on call_rcu() could be created. But only if -really- | ||
| 386 | * needed -- there is no shortage of RCU API members. | ||
| 387 | */ | ||
| 388 | static inline void list_splice_init_rcu(struct list_head *list, | ||
| 389 | struct list_head *head, | ||
| 390 | void (*sync)(void)) | ||
| 391 | { | ||
| 392 | struct list_head *first = list->next; | ||
| 393 | struct list_head *last = list->prev; | ||
| 394 | struct list_head *at = head->next; | ||
| 395 | |||
| 396 | if (list_empty(head)) | ||
| 397 | return; | ||
| 398 | |||
| 399 | /* "first" and "last" tracking list, so initialize it. */ | ||
| 400 | |||
| 401 | INIT_LIST_HEAD(list); | ||
| 402 | |||
| 403 | /* | ||
| 404 | * At this point, the list body still points to the source list. | ||
| 405 | * Wait for any readers to finish using the list before splicing | ||
| 406 | * the list body into the new list. Any new readers will see | ||
| 407 | * an empty list. | ||
| 408 | */ | ||
| 409 | |||
| 410 | sync(); | ||
| 411 | |||
| 412 | /* | ||
| 413 | * Readers are finished with the source list, so perform splice. | ||
| 414 | * The order is important if the new list is global and accessible | ||
| 415 | * to concurrent RCU readers. Note that RCU readers are not | ||
| 416 | * permitted to traverse the prev pointers without excluding | ||
| 417 | * this function. | ||
| 418 | */ | ||
| 419 | |||
| 420 | last->next = at; | ||
| 421 | smp_wmb(); | ||
| 422 | head->next = first; | ||
| 423 | first->prev = head; | ||
| 424 | at->prev = last; | ||
| 425 | } | ||
| 426 | |||
| 427 | /** | ||
| 428 | * list_entry - get the struct for this entry | 264 | * list_entry - get the struct for this entry |
| 429 | * @ptr: the &struct list_head pointer. | 265 | * @ptr: the &struct list_head pointer. |
| 430 | * @type: the type of the struct this is embedded in. | 266 | * @type: the type of the struct this is embedded in. |
| @@ -629,57 +465,6 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
| 629 | &pos->member != (head); \ | 465 | &pos->member != (head); \ |
| 630 | pos = n, n = list_entry(n->member.prev, typeof(*n), member)) | 466 | pos = n, n = list_entry(n->member.prev, typeof(*n), member)) |
| 631 | 467 | ||
| 632 | /** | ||
| 633 | * list_for_each_rcu - iterate over an rcu-protected list | ||
| 634 | * @pos: the &struct list_head to use as a loop cursor. | ||
| 635 | * @head: the head for your list. | ||
| 636 | * | ||
| 637 | * This list-traversal primitive may safely run concurrently with | ||
| 638 | * the _rcu list-mutation primitives such as list_add_rcu() | ||
| 639 | * as long as the traversal is guarded by rcu_read_lock(). | ||
| 640 | */ | ||
| 641 | #define list_for_each_rcu(pos, head) \ | ||
| 642 | for (pos = rcu_dereference((head)->next); \ | ||
| 643 | prefetch(pos->next), pos != (head); \ | ||
| 644 | pos = rcu_dereference(pos->next)) | ||
| 645 | |||
| 646 | #define __list_for_each_rcu(pos, head) \ | ||
| 647 | for (pos = rcu_dereference((head)->next); \ | ||
| 648 | pos != (head); \ | ||
| 649 | pos = rcu_dereference(pos->next)) | ||
| 650 | |||
| 651 | /** | ||
| 652 | * list_for_each_entry_rcu - iterate over rcu list of given type | ||
| 653 | * @pos: the type * to use as a loop cursor. | ||
| 654 | * @head: the head for your list. | ||
| 655 | * @member: the name of the list_struct within the struct. | ||
| 656 | * | ||
| 657 | * This list-traversal primitive may safely run concurrently with | ||
| 658 | * the _rcu list-mutation primitives such as list_add_rcu() | ||
| 659 | * as long as the traversal is guarded by rcu_read_lock(). | ||
| 660 | */ | ||
| 661 | #define list_for_each_entry_rcu(pos, head, member) \ | ||
| 662 | for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \ | ||
| 663 | prefetch(pos->member.next), &pos->member != (head); \ | ||
| 664 | pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member)) | ||
| 665 | |||
| 666 | |||
| 667 | /** | ||
| 668 | * list_for_each_continue_rcu | ||
| 669 | * @pos: the &struct list_head to use as a loop cursor. | ||
| 670 | * @head: the head for your list. | ||
| 671 | * | ||
| 672 | * Iterate over an rcu-protected list, continuing after current point. | ||
| 673 | * | ||
| 674 | * This list-traversal primitive may safely run concurrently with | ||
| 675 | * the _rcu list-mutation primitives such as list_add_rcu() | ||
| 676 | * as long as the traversal is guarded by rcu_read_lock(). | ||
| 677 | */ | ||
| 678 | #define list_for_each_continue_rcu(pos, head) \ | ||
| 679 | for ((pos) = rcu_dereference((pos)->next); \ | ||
| 680 | prefetch((pos)->next), (pos) != (head); \ | ||
| 681 | (pos) = rcu_dereference((pos)->next)) | ||
| 682 | |||
| 683 | /* | 468 | /* |
| 684 | * Double linked lists with a single pointer list head. | 469 | * Double linked lists with a single pointer list head. |
| 685 | * Mostly useful for hash tables where the two pointer list head is | 470 | * Mostly useful for hash tables where the two pointer list head is |
| @@ -730,31 +515,6 @@ static inline void hlist_del(struct hlist_node *n) | |||
| 730 | n->pprev = LIST_POISON2; | 515 | n->pprev = LIST_POISON2; |
| 731 | } | 516 | } |
| 732 | 517 | ||
| 733 | /** | ||
| 734 | * hlist_del_rcu - deletes entry from hash list without re-initialization | ||
| 735 | * @n: the element to delete from the hash list. | ||
| 736 | * | ||
| 737 | * Note: list_unhashed() on entry does not return true after this, | ||
| 738 | * the entry is in an undefined state. It is useful for RCU based | ||
| 739 | * lockfree traversal. | ||
| 740 | * | ||
| 741 | * In particular, it means that we can not poison the forward | ||
| 742 | * pointers that may still be used for walking the hash list. | ||
| 743 | * | ||
| 744 | * The caller must take whatever precautions are necessary | ||
| 745 | * (such as holding appropriate locks) to avoid racing | ||
| 746 | * with another list-mutation primitive, such as hlist_add_head_rcu() | ||
| 747 | * or hlist_del_rcu(), running on this same list. | ||
| 748 | * However, it is perfectly legal to run concurrently with | ||
| 749 | * the _rcu list-traversal primitives, such as | ||
| 750 | * hlist_for_each_entry(). | ||
| 751 | */ | ||
| 752 | static inline void hlist_del_rcu(struct hlist_node *n) | ||
| 753 | { | ||
| 754 | __hlist_del(n); | ||
| 755 | n->pprev = LIST_POISON2; | ||
| 756 | } | ||
| 757 | |||
| 758 | static inline void hlist_del_init(struct hlist_node *n) | 518 | static inline void hlist_del_init(struct hlist_node *n) |
| 759 | { | 519 | { |
| 760 | if (!hlist_unhashed(n)) { | 520 | if (!hlist_unhashed(n)) { |
| @@ -763,27 +523,6 @@ static inline void hlist_del_init(struct hlist_node *n) | |||
| 763 | } | 523 | } |
| 764 | } | 524 | } |
| 765 | 525 | ||
| 766 | /** | ||
| 767 | * hlist_replace_rcu - replace old entry by new one | ||
| 768 | * @old : the element to be replaced | ||
| 769 | * @new : the new element to insert | ||
| 770 | * | ||
| 771 | * The @old entry will be replaced with the @new entry atomically. | ||
| 772 | */ | ||
| 773 | static inline void hlist_replace_rcu(struct hlist_node *old, | ||
| 774 | struct hlist_node *new) | ||
| 775 | { | ||
| 776 | struct hlist_node *next = old->next; | ||
| 777 | |||
| 778 | new->next = next; | ||
| 779 | new->pprev = old->pprev; | ||
| 780 | smp_wmb(); | ||
| 781 | if (next) | ||
| 782 | new->next->pprev = &new->next; | ||
| 783 | *new->pprev = new; | ||
| 784 | old->pprev = LIST_POISON2; | ||
| 785 | } | ||
| 786 | |||
| 787 | static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) | 526 | static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) |
| 788 | { | 527 | { |
| 789 | struct hlist_node *first = h->first; | 528 | struct hlist_node *first = h->first; |
| @@ -794,38 +533,6 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) | |||
| 794 | n->pprev = &h->first; | 533 | n->pprev = &h->first; |
| 795 | } | 534 | } |
| 796 | 535 | ||
| 797 | |||
| 798 | /** | ||
| 799 | * hlist_add_head_rcu | ||
| 800 | * @n: the element to add to the hash list. | ||
| 801 | * @h: the list to add to. | ||
| 802 | * | ||
| 803 | * Description: | ||
| 804 | * Adds the specified element to the specified hlist, | ||
| 805 | * while permitting racing traversals. | ||
| 806 | * | ||
| 807 | * The caller must take whatever precautions are necessary | ||
| 808 | * (such as holding appropriate locks) to avoid racing | ||
| 809 | * with another list-mutation primitive, such as hlist_add_head_rcu() | ||
| 810 | * or hlist_del_rcu(), running on this same list. | ||
| 811 | * However, it is perfectly legal to run concurrently with | ||
| 812 | * the _rcu list-traversal primitives, such as | ||
| 813 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | ||
| 814 | * problems on Alpha CPUs. Regardless of the type of CPU, the | ||
| 815 | * list-traversal primitive must be guarded by rcu_read_lock(). | ||
| 816 | */ | ||
| 817 | static inline void hlist_add_head_rcu(struct hlist_node *n, | ||
| 818 | struct hlist_head *h) | ||
| 819 | { | ||
| 820 | struct hlist_node *first = h->first; | ||
| 821 | n->next = first; | ||
| 822 | n->pprev = &h->first; | ||
| 823 | smp_wmb(); | ||
| 824 | if (first) | ||
| 825 | first->pprev = &n->next; | ||
| 826 | h->first = n; | ||
| 827 | } | ||
| 828 | |||
| 829 | /* next must be != NULL */ | 536 | /* next must be != NULL */ |
| 830 | static inline void hlist_add_before(struct hlist_node *n, | 537 | static inline void hlist_add_before(struct hlist_node *n, |
| 831 | struct hlist_node *next) | 538 | struct hlist_node *next) |
| @@ -847,63 +554,6 @@ static inline void hlist_add_after(struct hlist_node *n, | |||
| 847 | next->next->pprev = &next->next; | 554 | next->next->pprev = &next->next; |
| 848 | } | 555 | } |
| 849 | 556 | ||
| 850 | /** | ||
| 851 | * hlist_add_before_rcu | ||
| 852 | * @n: the new element to add to the hash list. | ||
| 853 | * @next: the existing element to add the new element before. | ||
| 854 | * | ||
| 855 | * Description: | ||
| 856 | * Adds the specified element to the specified hlist | ||
| 857 | * before the specified node while permitting racing traversals. | ||
| 858 | * | ||
| 859 | * The caller must take whatever precautions are necessary | ||
| 860 | * (such as holding appropriate locks) to avoid racing | ||
| 861 | * with another list-mutation primitive, such as hlist_add_head_rcu() | ||
| 862 | * or hlist_del_rcu(), running on this same list. | ||
| 863 | * However, it is perfectly legal to run concurrently with | ||
| 864 | * the _rcu list-traversal primitives, such as | ||
| 865 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | ||
| 866 | * problems on Alpha CPUs. | ||
| 867 | */ | ||
| 868 | static inline void hlist_add_before_rcu(struct hlist_node *n, | ||
| 869 | struct hlist_node *next) | ||
| 870 | { | ||
| 871 | n->pprev = next->pprev; | ||
| 872 | n->next = next; | ||
| 873 | smp_wmb(); | ||
| 874 | next->pprev = &n->next; | ||
| 875 | *(n->pprev) = n; | ||
| 876 | } | ||
| 877 | |||
| 878 | /** | ||
| 879 | * hlist_add_after_rcu | ||
| 880 | * @prev: the existing element to add the new element after. | ||
| 881 | * @n: the new element to add to the hash list. | ||
| 882 | * | ||
| 883 | * Description: | ||
| 884 | * Adds the specified element to the specified hlist | ||
| 885 | * after the specified node while permitting racing traversals. | ||
| 886 | * | ||
| 887 | * The caller must take whatever precautions are necessary | ||
| 888 | * (such as holding appropriate locks) to avoid racing | ||
| 889 | * with another list-mutation primitive, such as hlist_add_head_rcu() | ||
| 890 | * or hlist_del_rcu(), running on this same list. | ||
| 891 | * However, it is perfectly legal to run concurrently with | ||
| 892 | * the _rcu list-traversal primitives, such as | ||
| 893 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | ||
| 894 | * problems on Alpha CPUs. | ||
| 895 | */ | ||
| 896 | static inline void hlist_add_after_rcu(struct hlist_node *prev, | ||
| 897 | struct hlist_node *n) | ||
| 898 | { | ||
| 899 | n->next = prev->next; | ||
| 900 | n->pprev = &prev->next; | ||
| 901 | smp_wmb(); | ||
| 902 | prev->next = n; | ||
| 903 | if (n->next) | ||
| 904 | n->next->pprev = &n->next; | ||
| 905 | } | ||
| 906 | |||
| 907 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) | 557 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) |
| 908 | 558 | ||
| 909 | #define hlist_for_each(pos, head) \ | 559 | #define hlist_for_each(pos, head) \ |
| @@ -964,21 +614,4 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
| 964 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | 614 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
| 965 | pos = n) | 615 | pos = n) |
| 966 | 616 | ||
| 967 | /** | ||
| 968 | * hlist_for_each_entry_rcu - iterate over rcu list of given type | ||
| 969 | * @tpos: the type * to use as a loop cursor. | ||
| 970 | * @pos: the &struct hlist_node to use as a loop cursor. | ||
| 971 | * @head: the head for your list. | ||
| 972 | * @member: the name of the hlist_node within the struct. | ||
| 973 | * | ||
| 974 | * This list-traversal primitive may safely run concurrently with | ||
| 975 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | ||
| 976 | * as long as the traversal is guarded by rcu_read_lock(). | ||
| 977 | */ | ||
| 978 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ | ||
| 979 | for (pos = rcu_dereference((head)->first); \ | ||
| 980 | pos && ({ prefetch(pos->next); 1;}) && \ | ||
| 981 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | ||
| 982 | pos = rcu_dereference(pos->next)) | ||
| 983 | |||
| 984 | #endif | 617 | #endif |
diff --git a/include/linux/lm_interface.h b/include/linux/lm_interface.h index f274997bc283..2ed8fa1b762b 100644 --- a/include/linux/lm_interface.h +++ b/include/linux/lm_interface.h | |||
| @@ -122,11 +122,9 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data); | |||
| 122 | */ | 122 | */ |
| 123 | 123 | ||
| 124 | #define LM_OUT_ST_MASK 0x00000003 | 124 | #define LM_OUT_ST_MASK 0x00000003 |
| 125 | #define LM_OUT_CACHEABLE 0x00000004 | ||
| 126 | #define LM_OUT_CANCELED 0x00000008 | 125 | #define LM_OUT_CANCELED 0x00000008 |
| 127 | #define LM_OUT_ASYNC 0x00000080 | 126 | #define LM_OUT_ASYNC 0x00000080 |
| 128 | #define LM_OUT_ERROR 0x00000100 | 127 | #define LM_OUT_ERROR 0x00000100 |
| 129 | #define LM_OUT_CONV_DEADLK 0x00000200 | ||
| 130 | 128 | ||
| 131 | /* | 129 | /* |
| 132 | * lm_callback_t types | 130 | * lm_callback_t types |
| @@ -138,9 +136,6 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data); | |||
| 138 | * LM_CB_NEED_RECOVERY | 136 | * LM_CB_NEED_RECOVERY |
| 139 | * The given journal needs to be recovered. | 137 | * The given journal needs to be recovered. |
| 140 | * | 138 | * |
| 141 | * LM_CB_DROPLOCKS | ||
| 142 | * Reduce the number of cached locks. | ||
| 143 | * | ||
| 144 | * LM_CB_ASYNC | 139 | * LM_CB_ASYNC |
| 145 | * The given lock has been granted. | 140 | * The given lock has been granted. |
| 146 | */ | 141 | */ |
| @@ -149,7 +144,6 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data); | |||
| 149 | #define LM_CB_NEED_D 258 | 144 | #define LM_CB_NEED_D 258 |
| 150 | #define LM_CB_NEED_S 259 | 145 | #define LM_CB_NEED_S 259 |
| 151 | #define LM_CB_NEED_RECOVERY 260 | 146 | #define LM_CB_NEED_RECOVERY 260 |
| 152 | #define LM_CB_DROPLOCKS 261 | ||
| 153 | #define LM_CB_ASYNC 262 | 147 | #define LM_CB_ASYNC 262 |
| 154 | 148 | ||
| 155 | /* | 149 | /* |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index a744383d16e9..81b3dd5206e0 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -398,7 +398,8 @@ int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_waterm | |||
| 398 | int mlx4_INIT_PORT(struct mlx4_dev *dev, int port); | 398 | int mlx4_INIT_PORT(struct mlx4_dev *dev, int port); |
| 399 | int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); | 399 | int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); |
| 400 | 400 | ||
| 401 | int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); | 401 | int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], |
| 402 | int block_mcast_loopback); | ||
| 402 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); | 403 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); |
| 403 | 404 | ||
| 404 | int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, | 405 | int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, |
diff --git a/include/linux/mpage.h b/include/linux/mpage.h index 068a0c9946af..5c42821da2d1 100644 --- a/include/linux/mpage.h +++ b/include/linux/mpage.h | |||
| @@ -11,11 +11,21 @@ | |||
| 11 | */ | 11 | */ |
| 12 | #ifdef CONFIG_BLOCK | 12 | #ifdef CONFIG_BLOCK |
| 13 | 13 | ||
| 14 | struct mpage_data { | ||
| 15 | struct bio *bio; | ||
| 16 | sector_t last_block_in_bio; | ||
| 17 | get_block_t *get_block; | ||
| 18 | unsigned use_writepage; | ||
| 19 | }; | ||
| 20 | |||
| 14 | struct writeback_control; | 21 | struct writeback_control; |
| 15 | 22 | ||
| 23 | struct bio *mpage_bio_submit(int rw, struct bio *bio); | ||
| 16 | int mpage_readpages(struct address_space *mapping, struct list_head *pages, | 24 | int mpage_readpages(struct address_space *mapping, struct list_head *pages, |
| 17 | unsigned nr_pages, get_block_t get_block); | 25 | unsigned nr_pages, get_block_t get_block); |
| 18 | int mpage_readpage(struct page *page, get_block_t get_block); | 26 | int mpage_readpage(struct page *page, get_block_t get_block); |
| 27 | int __mpage_writepage(struct page *page, struct writeback_control *wbc, | ||
| 28 | void *data); | ||
| 19 | int mpage_writepages(struct address_space *mapping, | 29 | int mpage_writepages(struct address_space *mapping, |
| 20 | struct writeback_control *wbc, get_block_t get_block); | 30 | struct writeback_control *wbc, get_block_t get_block); |
| 21 | int mpage_writepage(struct page *page, get_block_t *get_block, | 31 | int mpage_writepage(struct page *page, get_block_t *get_block, |
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 9007ccdfc112..208388835357 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h | |||
| @@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount); | |||
| 35 | void percpu_counter_destroy(struct percpu_counter *fbc); | 35 | void percpu_counter_destroy(struct percpu_counter *fbc); |
| 36 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount); | 36 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount); |
| 37 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); | 37 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); |
| 38 | s64 __percpu_counter_sum(struct percpu_counter *fbc); | 38 | s64 __percpu_counter_sum(struct percpu_counter *fbc, int set); |
| 39 | 39 | ||
| 40 | static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) | 40 | static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) |
| 41 | { | 41 | { |
| @@ -44,13 +44,19 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) | |||
| 44 | 44 | ||
| 45 | static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) | 45 | static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) |
| 46 | { | 46 | { |
| 47 | s64 ret = __percpu_counter_sum(fbc); | 47 | s64 ret = __percpu_counter_sum(fbc, 0); |
| 48 | return ret < 0 ? 0 : ret; | 48 | return ret < 0 ? 0 : ret; |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc) | ||
| 52 | { | ||
| 53 | return __percpu_counter_sum(fbc, 1); | ||
| 54 | } | ||
| 55 | |||
| 56 | |||
| 51 | static inline s64 percpu_counter_sum(struct percpu_counter *fbc) | 57 | static inline s64 percpu_counter_sum(struct percpu_counter *fbc) |
| 52 | { | 58 | { |
| 53 | return __percpu_counter_sum(fbc); | 59 | return __percpu_counter_sum(fbc, 0); |
| 54 | } | 60 | } |
| 55 | 61 | ||
| 56 | static inline s64 percpu_counter_read(struct percpu_counter *fbc) | 62 | static inline s64 percpu_counter_read(struct percpu_counter *fbc) |
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index b3aa05baab8a..8c774905dcfe 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h | |||
| @@ -151,7 +151,10 @@ extern struct lockdep_map rcu_lock_map; | |||
| 151 | 151 | ||
| 152 | #define __synchronize_sched() synchronize_rcu() | 152 | #define __synchronize_sched() synchronize_rcu() |
| 153 | 153 | ||
| 154 | #define call_rcu_sched(head, func) call_rcu(head, func) | ||
| 155 | |||
| 154 | extern void __rcu_init(void); | 156 | extern void __rcu_init(void); |
| 157 | #define rcu_init_sched() do { } while (0) | ||
| 155 | extern void rcu_check_callbacks(int cpu, int user); | 158 | extern void rcu_check_callbacks(int cpu, int user); |
| 156 | extern void rcu_restart_cpu(int cpu); | 159 | extern void rcu_restart_cpu(int cpu); |
| 157 | 160 | ||
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index bde4586f4382..b0f39be08b6c 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
| @@ -1,6 +1,373 @@ | |||
| 1 | #ifndef _LINUX_RCULIST_H | 1 | #ifndef _LINUX_RCULIST_H |
| 2 | #define _LINUX_RCULIST_H | 2 | #define _LINUX_RCULIST_H |
| 3 | 3 | ||
| 4 | #ifdef __KERNEL__ | ||
| 5 | |||
| 6 | /* | ||
| 7 | * RCU-protected list version | ||
| 8 | */ | ||
| 4 | #include <linux/list.h> | 9 | #include <linux/list.h> |
| 10 | #include <linux/rcupdate.h> | ||
| 11 | |||
| 12 | /* | ||
| 13 | * Insert a new entry between two known consecutive entries. | ||
| 14 | * | ||
| 15 | * This is only for internal list manipulation where we know | ||
| 16 | * the prev/next entries already! | ||
| 17 | */ | ||
| 18 | static inline void __list_add_rcu(struct list_head *new, | ||
| 19 | struct list_head *prev, struct list_head *next) | ||
| 20 | { | ||
| 21 | new->next = next; | ||
| 22 | new->prev = prev; | ||
| 23 | rcu_assign_pointer(prev->next, new); | ||
| 24 | next->prev = new; | ||
| 25 | } | ||
| 26 | |||
| 27 | /** | ||
| 28 | * list_add_rcu - add a new entry to rcu-protected list | ||
| 29 | * @new: new entry to be added | ||
| 30 | * @head: list head to add it after | ||
| 31 | * | ||
| 32 | * Insert a new entry after the specified head. | ||
| 33 | * This is good for implementing stacks. | ||
| 34 | * | ||
| 35 | * The caller must take whatever precautions are necessary | ||
| 36 | * (such as holding appropriate locks) to avoid racing | ||
| 37 | * with another list-mutation primitive, such as list_add_rcu() | ||
| 38 | * or list_del_rcu(), running on this same list. | ||
| 39 | * However, it is perfectly legal to run concurrently with | ||
| 40 | * the _rcu list-traversal primitives, such as | ||
| 41 | * list_for_each_entry_rcu(). | ||
| 42 | */ | ||
| 43 | static inline void list_add_rcu(struct list_head *new, struct list_head *head) | ||
| 44 | { | ||
| 45 | __list_add_rcu(new, head, head->next); | ||
| 46 | } | ||
| 47 | |||
| 48 | /** | ||
| 49 | * list_add_tail_rcu - add a new entry to rcu-protected list | ||
| 50 | * @new: new entry to be added | ||
| 51 | * @head: list head to add it before | ||
| 52 | * | ||
| 53 | * Insert a new entry before the specified head. | ||
| 54 | * This is useful for implementing queues. | ||
| 55 | * | ||
| 56 | * The caller must take whatever precautions are necessary | ||
| 57 | * (such as holding appropriate locks) to avoid racing | ||
| 58 | * with another list-mutation primitive, such as list_add_tail_rcu() | ||
| 59 | * or list_del_rcu(), running on this same list. | ||
| 60 | * However, it is perfectly legal to run concurrently with | ||
| 61 | * the _rcu list-traversal primitives, such as | ||
| 62 | * list_for_each_entry_rcu(). | ||
| 63 | */ | ||
| 64 | static inline void list_add_tail_rcu(struct list_head *new, | ||
| 65 | struct list_head *head) | ||
| 66 | { | ||
| 67 | __list_add_rcu(new, head->prev, head); | ||
| 68 | } | ||
| 69 | |||
| 70 | /** | ||
| 71 | * list_del_rcu - deletes entry from list without re-initialization | ||
| 72 | * @entry: the element to delete from the list. | ||
| 73 | * | ||
| 74 | * Note: list_empty() on entry does not return true after this, | ||
| 75 | * the entry is in an undefined state. It is useful for RCU based | ||
| 76 | * lockfree traversal. | ||
| 77 | * | ||
| 78 | * In particular, it means that we can not poison the forward | ||
| 79 | * pointers that may still be used for walking the list. | ||
| 80 | * | ||
| 81 | * The caller must take whatever precautions are necessary | ||
| 82 | * (such as holding appropriate locks) to avoid racing | ||
| 83 | * with another list-mutation primitive, such as list_del_rcu() | ||
| 84 | * or list_add_rcu(), running on this same list. | ||
| 85 | * However, it is perfectly legal to run concurrently with | ||
| 86 | * the _rcu list-traversal primitives, such as | ||
| 87 | * list_for_each_entry_rcu(). | ||
| 88 | * | ||
| 89 | * Note that the caller is not permitted to immediately free | ||
| 90 | * the newly deleted entry. Instead, either synchronize_rcu() | ||
| 91 | * or call_rcu() must be used to defer freeing until an RCU | ||
| 92 | * grace period has elapsed. | ||
| 93 | */ | ||
| 94 | static inline void list_del_rcu(struct list_head *entry) | ||
| 95 | { | ||
| 96 | __list_del(entry->prev, entry->next); | ||
| 97 | entry->prev = LIST_POISON2; | ||
| 98 | } | ||
| 99 | |||
| 100 | /** | ||
| 101 | * list_replace_rcu - replace old entry by new one | ||
| 102 | * @old : the element to be replaced | ||
| 103 | * @new : the new element to insert | ||
| 104 | * | ||
| 105 | * The @old entry will be replaced with the @new entry atomically. | ||
| 106 | * Note: @old should not be empty. | ||
| 107 | */ | ||
| 108 | static inline void list_replace_rcu(struct list_head *old, | ||
| 109 | struct list_head *new) | ||
| 110 | { | ||
| 111 | new->next = old->next; | ||
| 112 | new->prev = old->prev; | ||
| 113 | rcu_assign_pointer(new->prev->next, new); | ||
| 114 | new->next->prev = new; | ||
| 115 | old->prev = LIST_POISON2; | ||
| 116 | } | ||
| 117 | |||
| 118 | /** | ||
| 119 | * list_splice_init_rcu - splice an RCU-protected list into an existing list. | ||
| 120 | * @list: the RCU-protected list to splice | ||
| 121 | * @head: the place in the list to splice the first list into | ||
| 122 | * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... | ||
| 123 | * | ||
| 124 | * @head can be RCU-read traversed concurrently with this function. | ||
| 125 | * | ||
| 126 | * Note that this function blocks. | ||
| 127 | * | ||
| 128 | * Important note: the caller must take whatever action is necessary to | ||
| 129 | * prevent any other updates to @head. In principle, it is possible | ||
| 130 | * to modify the list as soon as sync() begins execution. | ||
| 131 | * If this sort of thing becomes necessary, an alternative version | ||
| 132 | * based on call_rcu() could be created. But only if -really- | ||
| 133 | * needed -- there is no shortage of RCU API members. | ||
| 134 | */ | ||
| 135 | static inline void list_splice_init_rcu(struct list_head *list, | ||
| 136 | struct list_head *head, | ||
| 137 | void (*sync)(void)) | ||
| 138 | { | ||
| 139 | struct list_head *first = list->next; | ||
| 140 | struct list_head *last = list->prev; | ||
| 141 | struct list_head *at = head->next; | ||
| 142 | |||
| 143 | if (list_empty(head)) | ||
| 144 | return; | ||
| 145 | |||
| 146 | /* "first" and "last" tracking list, so initialize it. */ | ||
| 147 | |||
| 148 | INIT_LIST_HEAD(list); | ||
| 149 | |||
| 150 | /* | ||
| 151 | * At this point, the list body still points to the source list. | ||
| 152 | * Wait for any readers to finish using the list before splicing | ||
| 153 | * the list body into the new list. Any new readers will see | ||
| 154 | * an empty list. | ||
| 155 | */ | ||
| 156 | |||
| 157 | sync(); | ||
| 158 | |||
| 159 | /* | ||
| 160 | * Readers are finished with the source list, so perform splice. | ||
| 161 | * The order is important if the new list is global and accessible | ||
| 162 | * to concurrent RCU readers. Note that RCU readers are not | ||
| 163 | * permitted to traverse the prev pointers without excluding | ||
| 164 | * this function. | ||
| 165 | */ | ||
| 166 | |||
| 167 | last->next = at; | ||
| 168 | rcu_assign_pointer(head->next, first); | ||
| 169 | first->prev = head; | ||
| 170 | at->prev = last; | ||
| 171 | } | ||
| 172 | |||
| 173 | /** | ||
| 174 | * list_for_each_rcu - iterate over an rcu-protected list | ||
| 175 | * @pos: the &struct list_head to use as a loop cursor. | ||
| 176 | * @head: the head for your list. | ||
| 177 | * | ||
| 178 | * This list-traversal primitive may safely run concurrently with | ||
| 179 | * the _rcu list-mutation primitives such as list_add_rcu() | ||
| 180 | * as long as the traversal is guarded by rcu_read_lock(). | ||
| 181 | */ | ||
| 182 | #define list_for_each_rcu(pos, head) \ | ||
| 183 | for (pos = rcu_dereference((head)->next); \ | ||
| 184 | prefetch(pos->next), pos != (head); \ | ||
| 185 | pos = rcu_dereference(pos->next)) | ||
| 186 | |||
| 187 | #define __list_for_each_rcu(pos, head) \ | ||
| 188 | for (pos = rcu_dereference((head)->next); \ | ||
| 189 | pos != (head); \ | ||
| 190 | pos = rcu_dereference(pos->next)) | ||
| 191 | |||
| 192 | /** | ||
| 193 | * list_for_each_entry_rcu - iterate over rcu list of given type | ||
| 194 | * @pos: the type * to use as a loop cursor. | ||
| 195 | * @head: the head for your list. | ||
| 196 | * @member: the name of the list_struct within the struct. | ||
| 197 | * | ||
| 198 | * This list-traversal primitive may safely run concurrently with | ||
| 199 | * the _rcu list-mutation primitives such as list_add_rcu() | ||
| 200 | * as long as the traversal is guarded by rcu_read_lock(). | ||
| 201 | */ | ||
| 202 | #define list_for_each_entry_rcu(pos, head, member) \ | ||
| 203 | for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \ | ||
| 204 | prefetch(pos->member.next), &pos->member != (head); \ | ||
| 205 | pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member)) | ||
| 206 | |||
| 207 | |||
| 208 | /** | ||
| 209 | * list_for_each_continue_rcu | ||
| 210 | * @pos: the &struct list_head to use as a loop cursor. | ||
| 211 | * @head: the head for your list. | ||
| 212 | * | ||
| 213 | * Iterate over an rcu-protected list, continuing after current point. | ||
| 214 | * | ||
| 215 | * This list-traversal primitive may safely run concurrently with | ||
| 216 | * the _rcu list-mutation primitives such as list_add_rcu() | ||
| 217 | * as long as the traversal is guarded by rcu_read_lock(). | ||
| 218 | */ | ||
| 219 | #define list_for_each_continue_rcu(pos, head) \ | ||
| 220 | for ((pos) = rcu_dereference((pos)->next); \ | ||
| 221 | prefetch((pos)->next), (pos) != (head); \ | ||
| 222 | (pos) = rcu_dereference((pos)->next)) | ||
| 223 | |||
| 224 | /** | ||
| 225 | * hlist_del_rcu - deletes entry from hash list without re-initialization | ||
| 226 | * @n: the element to delete from the hash list. | ||
| 227 | * | ||
| 228 | * Note: list_unhashed() on entry does not return true after this, | ||
| 229 | * the entry is in an undefined state. It is useful for RCU based | ||
| 230 | * lockfree traversal. | ||
| 231 | * | ||
| 232 | * In particular, it means that we can not poison the forward | ||
| 233 | * pointers that may still be used for walking the hash list. | ||
| 234 | * | ||
| 235 | * The caller must take whatever precautions are necessary | ||
| 236 | * (such as holding appropriate locks) to avoid racing | ||
| 237 | * with another list-mutation primitive, such as hlist_add_head_rcu() | ||
| 238 | * or hlist_del_rcu(), running on this same list. | ||
| 239 | * However, it is perfectly legal to run concurrently with | ||
| 240 | * the _rcu list-traversal primitives, such as | ||
| 241 | * hlist_for_each_entry(). | ||
| 242 | */ | ||
| 243 | static inline void hlist_del_rcu(struct hlist_node *n) | ||
| 244 | { | ||
| 245 | __hlist_del(n); | ||
| 246 | n->pprev = LIST_POISON2; | ||
| 247 | } | ||
| 248 | |||
| 249 | /** | ||
| 250 | * hlist_replace_rcu - replace old entry by new one | ||
| 251 | * @old : the element to be replaced | ||
| 252 | * @new : the new element to insert | ||
| 253 | * | ||
| 254 | * The @old entry will be replaced with the @new entry atomically. | ||
| 255 | */ | ||
| 256 | static inline void hlist_replace_rcu(struct hlist_node *old, | ||
| 257 | struct hlist_node *new) | ||
| 258 | { | ||
| 259 | struct hlist_node *next = old->next; | ||
| 260 | |||
| 261 | new->next = next; | ||
| 262 | new->pprev = old->pprev; | ||
| 263 | rcu_assign_pointer(*new->pprev, new); | ||
| 264 | if (next) | ||
| 265 | new->next->pprev = &new->next; | ||
| 266 | old->pprev = LIST_POISON2; | ||
| 267 | } | ||
| 268 | |||
| 269 | /** | ||
| 270 | * hlist_add_head_rcu | ||
| 271 | * @n: the element to add to the hash list. | ||
| 272 | * @h: the list to add to. | ||
| 273 | * | ||
| 274 | * Description: | ||
| 275 | * Adds the specified element to the specified hlist, | ||
| 276 | * while permitting racing traversals. | ||
| 277 | * | ||
| 278 | * The caller must take whatever precautions are necessary | ||
| 279 | * (such as holding appropriate locks) to avoid racing | ||
| 280 | * with another list-mutation primitive, such as hlist_add_head_rcu() | ||
| 281 | * or hlist_del_rcu(), running on this same list. | ||
| 282 | * However, it is perfectly legal to run concurrently with | ||
| 283 | * the _rcu list-traversal primitives, such as | ||
| 284 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | ||
| 285 | * problems on Alpha CPUs. Regardless of the type of CPU, the | ||
| 286 | * list-traversal primitive must be guarded by rcu_read_lock(). | ||
| 287 | */ | ||
| 288 | static inline void hlist_add_head_rcu(struct hlist_node *n, | ||
| 289 | struct hlist_head *h) | ||
| 290 | { | ||
| 291 | struct hlist_node *first = h->first; | ||
| 292 | |||
| 293 | n->next = first; | ||
| 294 | n->pprev = &h->first; | ||
| 295 | rcu_assign_pointer(h->first, n); | ||
| 296 | if (first) | ||
| 297 | first->pprev = &n->next; | ||
| 298 | } | ||
| 299 | |||
| 300 | /** | ||
| 301 | * hlist_add_before_rcu | ||
| 302 | * @n: the new element to add to the hash list. | ||
| 303 | * @next: the existing element to add the new element before. | ||
| 304 | * | ||
| 305 | * Description: | ||
| 306 | * Adds the specified element to the specified hlist | ||
| 307 | * before the specified node while permitting racing traversals. | ||
| 308 | * | ||
| 309 | * The caller must take whatever precautions are necessary | ||
| 310 | * (such as holding appropriate locks) to avoid racing | ||
| 311 | * with another list-mutation primitive, such as hlist_add_head_rcu() | ||
| 312 | * or hlist_del_rcu(), running on this same list. | ||
| 313 | * However, it is perfectly legal to run concurrently with | ||
| 314 | * the _rcu list-traversal primitives, such as | ||
| 315 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | ||
| 316 | * problems on Alpha CPUs. | ||
| 317 | */ | ||
| 318 | static inline void hlist_add_before_rcu(struct hlist_node *n, | ||
| 319 | struct hlist_node *next) | ||
| 320 | { | ||
| 321 | n->pprev = next->pprev; | ||
| 322 | n->next = next; | ||
| 323 | rcu_assign_pointer(*(n->pprev), n); | ||
| 324 | next->pprev = &n->next; | ||
| 325 | } | ||
| 326 | |||
| 327 | /** | ||
| 328 | * hlist_add_after_rcu | ||
| 329 | * @prev: the existing element to add the new element after. | ||
| 330 | * @n: the new element to add to the hash list. | ||
| 331 | * | ||
| 332 | * Description: | ||
| 333 | * Adds the specified element to the specified hlist | ||
| 334 | * after the specified node while permitting racing traversals. | ||
| 335 | * | ||
| 336 | * The caller must take whatever precautions are necessary | ||
| 337 | * (such as holding appropriate locks) to avoid racing | ||
| 338 | * with another list-mutation primitive, such as hlist_add_head_rcu() | ||
| 339 | * or hlist_del_rcu(), running on this same list. | ||
| 340 | * However, it is perfectly legal to run concurrently with | ||
| 341 | * the _rcu list-traversal primitives, such as | ||
| 342 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | ||
| 343 | * problems on Alpha CPUs. | ||
| 344 | */ | ||
| 345 | static inline void hlist_add_after_rcu(struct hlist_node *prev, | ||
| 346 | struct hlist_node *n) | ||
| 347 | { | ||
| 348 | n->next = prev->next; | ||
| 349 | n->pprev = &prev->next; | ||
| 350 | rcu_assign_pointer(prev->next, n); | ||
| 351 | if (n->next) | ||
| 352 | n->next->pprev = &n->next; | ||
| 353 | } | ||
| 354 | |||
| 355 | /** | ||
| 356 | * hlist_for_each_entry_rcu - iterate over rcu list of given type | ||
| 357 | * @tpos: the type * to use as a loop cursor. | ||
| 358 | * @pos: the &struct hlist_node to use as a loop cursor. | ||
| 359 | * @head: the head for your list. | ||
| 360 | * @member: the name of the hlist_node within the struct. | ||
| 361 | * | ||
| 362 | * This list-traversal primitive may safely run concurrently with | ||
| 363 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | ||
| 364 | * as long as the traversal is guarded by rcu_read_lock(). | ||
| 365 | */ | ||
| 366 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ | ||
| 367 | for (pos = rcu_dereference((head)->first); \ | ||
| 368 | pos && ({ prefetch(pos->next); 1; }) && \ | ||
| 369 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | ||
| 370 | pos = rcu_dereference(pos->next)) | ||
| 5 | 371 | ||
| 6 | #endif /* _LINUX_RCULIST_H */ | 372 | #endif /* __KERNEL__ */ |
| 373 | #endif | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index d42dbec06083..e8b4039cfb2f 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <linux/cpumask.h> | 40 | #include <linux/cpumask.h> |
| 41 | #include <linux/seqlock.h> | 41 | #include <linux/seqlock.h> |
| 42 | #include <linux/lockdep.h> | 42 | #include <linux/lockdep.h> |
| 43 | #include <linux/completion.h> | ||
| 43 | 44 | ||
| 44 | /** | 45 | /** |
| 45 | * struct rcu_head - callback structure for use with RCU | 46 | * struct rcu_head - callback structure for use with RCU |
| @@ -168,6 +169,27 @@ struct rcu_head { | |||
| 168 | (p) = (v); \ | 169 | (p) = (v); \ |
| 169 | }) | 170 | }) |
| 170 | 171 | ||
| 172 | /* Infrastructure to implement the synchronize_() primitives. */ | ||
| 173 | |||
| 174 | struct rcu_synchronize { | ||
| 175 | struct rcu_head head; | ||
| 176 | struct completion completion; | ||
| 177 | }; | ||
| 178 | |||
| 179 | extern void wakeme_after_rcu(struct rcu_head *head); | ||
| 180 | |||
| 181 | #define synchronize_rcu_xxx(name, func) \ | ||
| 182 | void name(void) \ | ||
| 183 | { \ | ||
| 184 | struct rcu_synchronize rcu; \ | ||
| 185 | \ | ||
| 186 | init_completion(&rcu.completion); \ | ||
| 187 | /* Will wake me after RCU finished. */ \ | ||
| 188 | func(&rcu.head, wakeme_after_rcu); \ | ||
| 189 | /* Wait for it. */ \ | ||
| 190 | wait_for_completion(&rcu.completion); \ | ||
| 191 | } | ||
| 192 | |||
| 171 | /** | 193 | /** |
| 172 | * synchronize_sched - block until all CPUs have exited any non-preemptive | 194 | * synchronize_sched - block until all CPUs have exited any non-preemptive |
| 173 | * kernel code sequences. | 195 | * kernel code sequences. |
| @@ -224,8 +246,8 @@ extern void call_rcu_bh(struct rcu_head *head, | |||
| 224 | /* Exported common interfaces */ | 246 | /* Exported common interfaces */ |
| 225 | extern void synchronize_rcu(void); | 247 | extern void synchronize_rcu(void); |
| 226 | extern void rcu_barrier(void); | 248 | extern void rcu_barrier(void); |
| 227 | extern long rcu_batches_completed(void); | 249 | extern void rcu_barrier_bh(void); |
| 228 | extern long rcu_batches_completed_bh(void); | 250 | extern void rcu_barrier_sched(void); |
| 229 | 251 | ||
| 230 | /* Internal to kernel */ | 252 | /* Internal to kernel */ |
| 231 | extern void rcu_init(void); | 253 | extern void rcu_init(void); |
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index 8a05c7e20bc4..f04b64eca636 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h | |||
| @@ -40,10 +40,39 @@ | |||
| 40 | #include <linux/cpumask.h> | 40 | #include <linux/cpumask.h> |
| 41 | #include <linux/seqlock.h> | 41 | #include <linux/seqlock.h> |
| 42 | 42 | ||
| 43 | #define rcu_qsctr_inc(cpu) | 43 | struct rcu_dyntick_sched { |
| 44 | int dynticks; | ||
| 45 | int dynticks_snap; | ||
| 46 | int sched_qs; | ||
| 47 | int sched_qs_snap; | ||
| 48 | int sched_dynticks_snap; | ||
| 49 | }; | ||
| 50 | |||
| 51 | DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); | ||
| 52 | |||
| 53 | static inline void rcu_qsctr_inc(int cpu) | ||
| 54 | { | ||
| 55 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
| 56 | |||
| 57 | rdssp->sched_qs++; | ||
| 58 | } | ||
| 44 | #define rcu_bh_qsctr_inc(cpu) | 59 | #define rcu_bh_qsctr_inc(cpu) |
| 45 | #define call_rcu_bh(head, rcu) call_rcu(head, rcu) | 60 | #define call_rcu_bh(head, rcu) call_rcu(head, rcu) |
| 46 | 61 | ||
| 62 | /** | ||
| 63 | * call_rcu_sched - Queue RCU callback for invocation after sched grace period. | ||
| 64 | * @head: structure to be used for queueing the RCU updates. | ||
| 65 | * @func: actual update function to be invoked after the grace period | ||
| 66 | * | ||
| 67 | * The update function will be invoked some time after a full | ||
| 68 | * synchronize_sched()-style grace period elapses, in other words after | ||
| 69 | * all currently executing preempt-disabled sections of code (including | ||
| 70 | * hardirq handlers, NMI handlers, and local_irq_save() blocks) have | ||
| 71 | * completed. | ||
| 72 | */ | ||
| 73 | extern void call_rcu_sched(struct rcu_head *head, | ||
| 74 | void (*func)(struct rcu_head *head)); | ||
| 75 | |||
| 47 | extern void __rcu_read_lock(void) __acquires(RCU); | 76 | extern void __rcu_read_lock(void) __acquires(RCU); |
| 48 | extern void __rcu_read_unlock(void) __releases(RCU); | 77 | extern void __rcu_read_unlock(void) __releases(RCU); |
| 49 | extern int rcu_pending(int cpu); | 78 | extern int rcu_pending(int cpu); |
| @@ -55,6 +84,7 @@ extern int rcu_needs_cpu(int cpu); | |||
| 55 | extern void __synchronize_sched(void); | 84 | extern void __synchronize_sched(void); |
| 56 | 85 | ||
| 57 | extern void __rcu_init(void); | 86 | extern void __rcu_init(void); |
| 87 | extern void rcu_init_sched(void); | ||
| 58 | extern void rcu_check_callbacks(int cpu, int user); | 88 | extern void rcu_check_callbacks(int cpu, int user); |
| 59 | extern void rcu_restart_cpu(int cpu); | 89 | extern void rcu_restart_cpu(int cpu); |
| 60 | extern long rcu_batches_completed(void); | 90 | extern long rcu_batches_completed(void); |
| @@ -81,20 +111,20 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); | |||
| 81 | struct softirq_action; | 111 | struct softirq_action; |
| 82 | 112 | ||
| 83 | #ifdef CONFIG_NO_HZ | 113 | #ifdef CONFIG_NO_HZ |
| 84 | DECLARE_PER_CPU(long, dynticks_progress_counter); | 114 | DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); |
| 85 | 115 | ||
| 86 | static inline void rcu_enter_nohz(void) | 116 | static inline void rcu_enter_nohz(void) |
| 87 | { | 117 | { |
| 88 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | 118 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ |
| 89 | __get_cpu_var(dynticks_progress_counter)++; | 119 | __get_cpu_var(rcu_dyntick_sched).dynticks++; |
| 90 | WARN_ON(__get_cpu_var(dynticks_progress_counter) & 0x1); | 120 | WARN_ON(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1); |
| 91 | } | 121 | } |
| 92 | 122 | ||
| 93 | static inline void rcu_exit_nohz(void) | 123 | static inline void rcu_exit_nohz(void) |
| 94 | { | 124 | { |
| 95 | __get_cpu_var(dynticks_progress_counter)++; | ||
| 96 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 125 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
| 97 | WARN_ON(!(__get_cpu_var(dynticks_progress_counter) & 0x1)); | 126 | __get_cpu_var(rcu_dyntick_sched).dynticks++; |
| 127 | WARN_ON(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1)); | ||
| 98 | } | 128 | } |
| 99 | 129 | ||
| 100 | #else /* CONFIG_NO_HZ */ | 130 | #else /* CONFIG_NO_HZ */ |
diff --git a/include/linux/smp.h b/include/linux/smp.h index 55232ccf9cfd..48262f86c969 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
| @@ -7,9 +7,18 @@ | |||
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
| 10 | #include <linux/list.h> | ||
| 11 | #include <linux/cpumask.h> | ||
| 10 | 12 | ||
| 11 | extern void cpu_idle(void); | 13 | extern void cpu_idle(void); |
| 12 | 14 | ||
| 15 | struct call_single_data { | ||
| 16 | struct list_head list; | ||
| 17 | void (*func) (void *info); | ||
| 18 | void *info; | ||
| 19 | unsigned int flags; | ||
| 20 | }; | ||
| 21 | |||
| 13 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
| 14 | 23 | ||
| 15 | #include <linux/preempt.h> | 24 | #include <linux/preempt.h> |
| @@ -52,15 +61,34 @@ extern void smp_cpus_done(unsigned int max_cpus); | |||
| 52 | /* | 61 | /* |
| 53 | * Call a function on all other processors | 62 | * Call a function on all other processors |
| 54 | */ | 63 | */ |
| 55 | int smp_call_function(void(*func)(void *info), void *info, int retry, int wait); | 64 | int smp_call_function(void(*func)(void *info), void *info, int wait); |
| 56 | 65 | int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, | |
| 66 | int wait); | ||
| 57 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | 67 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, |
| 58 | int retry, int wait); | 68 | int wait); |
| 69 | void __smp_call_function_single(int cpuid, struct call_single_data *data); | ||
| 70 | |||
| 71 | /* | ||
| 72 | * Generic and arch helpers | ||
| 73 | */ | ||
| 74 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | ||
| 75 | void generic_smp_call_function_single_interrupt(void); | ||
| 76 | void generic_smp_call_function_interrupt(void); | ||
| 77 | void init_call_single_data(void); | ||
| 78 | void ipi_call_lock(void); | ||
| 79 | void ipi_call_unlock(void); | ||
| 80 | void ipi_call_lock_irq(void); | ||
| 81 | void ipi_call_unlock_irq(void); | ||
| 82 | #else | ||
| 83 | static inline void init_call_single_data(void) | ||
| 84 | { | ||
| 85 | } | ||
| 86 | #endif | ||
| 59 | 87 | ||
| 60 | /* | 88 | /* |
| 61 | * Call a function on all processors | 89 | * Call a function on all processors |
| 62 | */ | 90 | */ |
| 63 | int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait); | 91 | int on_each_cpu(void (*func) (void *info), void *info, int wait); |
| 64 | 92 | ||
| 65 | #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ | 93 | #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ |
| 66 | #define MSG_ALL 0x8001 | 94 | #define MSG_ALL 0x8001 |
| @@ -90,9 +118,9 @@ static inline int up_smp_call_function(void (*func)(void *), void *info) | |||
| 90 | { | 118 | { |
| 91 | return 0; | 119 | return 0; |
| 92 | } | 120 | } |
| 93 | #define smp_call_function(func, info, retry, wait) \ | 121 | #define smp_call_function(func, info, wait) \ |
| 94 | (up_smp_call_function(func, info)) | 122 | (up_smp_call_function(func, info)) |
| 95 | #define on_each_cpu(func,info,retry,wait) \ | 123 | #define on_each_cpu(func,info,wait) \ |
| 96 | ({ \ | 124 | ({ \ |
| 97 | local_irq_disable(); \ | 125 | local_irq_disable(); \ |
| 98 | func(info); \ | 126 | func(info); \ |
| @@ -102,7 +130,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info) | |||
| 102 | static inline void smp_send_reschedule(int cpu) { } | 130 | static inline void smp_send_reschedule(int cpu) { } |
| 103 | #define num_booting_cpus() 1 | 131 | #define num_booting_cpus() 1 |
| 104 | #define smp_prepare_boot_cpu() do {} while (0) | 132 | #define smp_prepare_boot_cpu() do {} while (0) |
| 105 | #define smp_call_function_single(cpuid, func, info, retry, wait) \ | 133 | #define smp_call_function_single(cpuid, func, info, wait) \ |
| 106 | ({ \ | 134 | ({ \ |
| 107 | WARN_ON(cpuid != 0); \ | 135 | WARN_ON(cpuid != 0); \ |
| 108 | local_irq_disable(); \ | 136 | local_irq_disable(); \ |
| @@ -112,7 +140,9 @@ static inline void smp_send_reschedule(int cpu) { } | |||
| 112 | }) | 140 | }) |
| 113 | #define smp_call_function_mask(mask, func, info, wait) \ | 141 | #define smp_call_function_mask(mask, func, info, wait) \ |
| 114 | (up_smp_call_function(func, info)) | 142 | (up_smp_call_function(func, info)) |
| 115 | 143 | static inline void init_call_single_data(void) | |
| 144 | { | ||
| 145 | } | ||
| 116 | #endif /* !SMP */ | 146 | #endif /* !SMP */ |
| 117 | 147 | ||
| 118 | /* | 148 | /* |
diff --git a/include/linux/topology.h b/include/linux/topology.h index 24f3d2282e11..2158fc0d5a56 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
| @@ -179,4 +179,17 @@ void arch_update_cpu_topology(void); | |||
| 179 | #endif | 179 | #endif |
| 180 | #endif /* CONFIG_NUMA */ | 180 | #endif /* CONFIG_NUMA */ |
| 181 | 181 | ||
| 182 | #ifndef topology_physical_package_id | ||
| 183 | #define topology_physical_package_id(cpu) ((void)(cpu), -1) | ||
| 184 | #endif | ||
| 185 | #ifndef topology_core_id | ||
| 186 | #define topology_core_id(cpu) ((void)(cpu), 0) | ||
| 187 | #endif | ||
| 188 | #ifndef topology_thread_siblings | ||
| 189 | #define topology_thread_siblings(cpu) cpumask_of_cpu(cpu) | ||
| 190 | #endif | ||
| 191 | #ifndef topology_core_siblings | ||
| 192 | #define topology_core_siblings(cpu) cpumask_of_cpu(cpu) | ||
| 193 | #endif | ||
| 194 | |||
| 182 | #endif /* _LINUX_TOPOLOGY_H */ | 195 | #endif /* _LINUX_TOPOLOGY_H */ |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index bd91987c065f..12b15c561a1f 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
| @@ -63,6 +63,7 @@ struct writeback_control { | |||
| 63 | unsigned for_writepages:1; /* This is a writepages() call */ | 63 | unsigned for_writepages:1; /* This is a writepages() call */ |
| 64 | unsigned range_cyclic:1; /* range_start is cyclic */ | 64 | unsigned range_cyclic:1; /* range_start is cyclic */ |
| 65 | unsigned more_io:1; /* more io to be dispatched */ | 65 | unsigned more_io:1; /* more io to be dispatched */ |
| 66 | unsigned range_cont:1; | ||
| 66 | }; | 67 | }; |
| 67 | 68 | ||
| 68 | /* | 69 | /* |
