diff options
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/ata.h | 2 | ||||
| -rw-r--r-- | include/linux/bootmem.h | 36 | ||||
| -rw-r--r-- | include/linux/cpufreq.h | 1 | ||||
| -rw-r--r-- | include/linux/dmaengine.h | 7 | ||||
| -rw-r--r-- | include/linux/hdreg.h | 1 | ||||
| -rw-r--r-- | include/linux/ide.h | 1 | ||||
| -rw-r--r-- | include/linux/libata.h | 6 | ||||
| -rw-r--r-- | include/linux/netdevice.h | 1 | ||||
| -rw-r--r-- | include/linux/percpu.h | 108 | ||||
| -rw-r--r-- | include/linux/rcuclassic.h | 6 | ||||
| -rw-r--r-- | include/linux/rcupdate.h | 4 | ||||
| -rw-r--r-- | include/linux/rcupreempt.h | 15 | ||||
| -rw-r--r-- | include/linux/rcutree.h | 6 | ||||
| -rw-r--r-- | include/linux/sched.h | 4 | ||||
| -rw-r--r-- | include/linux/serio.h | 2 | ||||
| -rw-r--r-- | include/linux/vmalloc.h | 4 |
16 files changed, 142 insertions, 62 deletions
diff --git a/include/linux/ata.h b/include/linux/ata.h index 08a86d5cdf1..9a061accd8b 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
| @@ -89,6 +89,8 @@ enum { | |||
| 89 | ATA_ID_DLF = 128, | 89 | ATA_ID_DLF = 128, |
| 90 | ATA_ID_CSFO = 129, | 90 | ATA_ID_CSFO = 129, |
| 91 | ATA_ID_CFA_POWER = 160, | 91 | ATA_ID_CFA_POWER = 160, |
| 92 | ATA_ID_CFA_KEY_MGMT = 162, | ||
| 93 | ATA_ID_CFA_MODES = 163, | ||
| 92 | ATA_ID_ROT_SPEED = 217, | 94 | ATA_ID_ROT_SPEED = 217, |
| 93 | ATA_ID_PIO4 = (1 << 1), | 95 | ATA_ID_PIO4 = (1 << 1), |
| 94 | 96 | ||
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 95837bfb525..455d83219fa 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
| @@ -65,23 +65,20 @@ extern void free_bootmem(unsigned long addr, unsigned long size); | |||
| 65 | #define BOOTMEM_DEFAULT 0 | 65 | #define BOOTMEM_DEFAULT 0 |
| 66 | #define BOOTMEM_EXCLUSIVE (1<<0) | 66 | #define BOOTMEM_EXCLUSIVE (1<<0) |
| 67 | 67 | ||
| 68 | extern int reserve_bootmem(unsigned long addr, | ||
| 69 | unsigned long size, | ||
| 70 | int flags); | ||
| 68 | extern int reserve_bootmem_node(pg_data_t *pgdat, | 71 | extern int reserve_bootmem_node(pg_data_t *pgdat, |
| 69 | unsigned long physaddr, | 72 | unsigned long physaddr, |
| 70 | unsigned long size, | 73 | unsigned long size, |
| 71 | int flags); | 74 | int flags); |
| 72 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | ||
| 73 | extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags); | ||
| 74 | #endif | ||
| 75 | 75 | ||
| 76 | extern void *__alloc_bootmem_nopanic(unsigned long size, | 76 | extern void *__alloc_bootmem(unsigned long size, |
| 77 | unsigned long align, | 77 | unsigned long align, |
| 78 | unsigned long goal); | 78 | unsigned long goal); |
| 79 | extern void *__alloc_bootmem(unsigned long size, | 79 | extern void *__alloc_bootmem_nopanic(unsigned long size, |
| 80 | unsigned long align, | 80 | unsigned long align, |
| 81 | unsigned long goal); | 81 | unsigned long goal); |
| 82 | extern void *__alloc_bootmem_low(unsigned long size, | ||
| 83 | unsigned long align, | ||
| 84 | unsigned long goal); | ||
| 85 | extern void *__alloc_bootmem_node(pg_data_t *pgdat, | 82 | extern void *__alloc_bootmem_node(pg_data_t *pgdat, |
| 86 | unsigned long size, | 83 | unsigned long size, |
| 87 | unsigned long align, | 84 | unsigned long align, |
| @@ -90,30 +87,35 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, | |||
| 90 | unsigned long size, | 87 | unsigned long size, |
| 91 | unsigned long align, | 88 | unsigned long align, |
| 92 | unsigned long goal); | 89 | unsigned long goal); |
| 90 | extern void *__alloc_bootmem_low(unsigned long size, | ||
| 91 | unsigned long align, | ||
| 92 | unsigned long goal); | ||
| 93 | extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, | 93 | extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, |
| 94 | unsigned long size, | 94 | unsigned long size, |
| 95 | unsigned long align, | 95 | unsigned long align, |
| 96 | unsigned long goal); | 96 | unsigned long goal); |
| 97 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | 97 | |
| 98 | #define alloc_bootmem(x) \ | 98 | #define alloc_bootmem(x) \ |
| 99 | __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 99 | __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
| 100 | #define alloc_bootmem_nopanic(x) \ | 100 | #define alloc_bootmem_nopanic(x) \ |
| 101 | __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 101 | __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
| 102 | #define alloc_bootmem_low(x) \ | ||
| 103 | __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) | ||
| 104 | #define alloc_bootmem_pages(x) \ | 102 | #define alloc_bootmem_pages(x) \ |
| 105 | __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 103 | __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
| 106 | #define alloc_bootmem_pages_nopanic(x) \ | 104 | #define alloc_bootmem_pages_nopanic(x) \ |
| 107 | __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 105 | __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
| 108 | #define alloc_bootmem_low_pages(x) \ | ||
| 109 | __alloc_bootmem_low(x, PAGE_SIZE, 0) | ||
| 110 | #define alloc_bootmem_node(pgdat, x) \ | 106 | #define alloc_bootmem_node(pgdat, x) \ |
| 111 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 107 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
| 112 | #define alloc_bootmem_pages_node(pgdat, x) \ | 108 | #define alloc_bootmem_pages_node(pgdat, x) \ |
| 113 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 109 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
| 110 | #define alloc_bootmem_pages_node_nopanic(pgdat, x) \ | ||
| 111 | __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | ||
| 112 | |||
| 113 | #define alloc_bootmem_low(x) \ | ||
| 114 | __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) | ||
| 115 | #define alloc_bootmem_low_pages(x) \ | ||
| 116 | __alloc_bootmem_low(x, PAGE_SIZE, 0) | ||
| 114 | #define alloc_bootmem_low_pages_node(pgdat, x) \ | 117 | #define alloc_bootmem_low_pages_node(pgdat, x) \ |
| 115 | __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) | 118 | __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) |
| 116 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ | ||
| 117 | 119 | ||
| 118 | extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, | 120 | extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, |
| 119 | int flags); | 121 | int flags); |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 384b38d3e8e..161042746af 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
| @@ -234,7 +234,6 @@ struct cpufreq_driver { | |||
| 234 | int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); | 234 | int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); |
| 235 | int (*resume) (struct cpufreq_policy *policy); | 235 | int (*resume) (struct cpufreq_policy *policy); |
| 236 | struct freq_attr **attr; | 236 | struct freq_attr **attr; |
| 237 | bool hide_interface; | ||
| 238 | }; | 237 | }; |
| 239 | 238 | ||
| 240 | /* flags */ | 239 | /* flags */ |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index f0413845f20..1956c8d46d3 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -97,7 +97,6 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; | |||
| 97 | 97 | ||
| 98 | /** | 98 | /** |
| 99 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan | 99 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan |
| 100 | * @refcount: local_t used for open-coded "bigref" counting | ||
| 101 | * @memcpy_count: transaction counter | 100 | * @memcpy_count: transaction counter |
| 102 | * @bytes_transferred: byte counter | 101 | * @bytes_transferred: byte counter |
| 103 | */ | 102 | */ |
| @@ -114,9 +113,6 @@ struct dma_chan_percpu { | |||
| 114 | * @cookie: last cookie value returned to client | 113 | * @cookie: last cookie value returned to client |
| 115 | * @chan_id: channel ID for sysfs | 114 | * @chan_id: channel ID for sysfs |
| 116 | * @dev: class device for sysfs | 115 | * @dev: class device for sysfs |
| 117 | * @refcount: kref, used in "bigref" slow-mode | ||
| 118 | * @slow_ref: indicates that the DMA channel is free | ||
| 119 | * @rcu: the DMA channel's RCU head | ||
| 120 | * @device_node: used to add this to the device chan list | 116 | * @device_node: used to add this to the device chan list |
| 121 | * @local: per-cpu pointer to a struct dma_chan_percpu | 117 | * @local: per-cpu pointer to a struct dma_chan_percpu |
| 122 | * @client-count: how many clients are using this channel | 118 | * @client-count: how many clients are using this channel |
| @@ -213,8 +209,6 @@ struct dma_async_tx_descriptor { | |||
| 213 | * @global_node: list_head for global dma_device_list | 209 | * @global_node: list_head for global dma_device_list |
| 214 | * @cap_mask: one or more dma_capability flags | 210 | * @cap_mask: one or more dma_capability flags |
| 215 | * @max_xor: maximum number of xor sources, 0 if no capability | 211 | * @max_xor: maximum number of xor sources, 0 if no capability |
| 216 | * @refcount: reference count | ||
| 217 | * @done: IO completion struct | ||
| 218 | * @dev_id: unique device ID | 212 | * @dev_id: unique device ID |
| 219 | * @dev: struct device reference for dma mapping api | 213 | * @dev: struct device reference for dma mapping api |
| 220 | * @device_alloc_chan_resources: allocate resources and return the | 214 | * @device_alloc_chan_resources: allocate resources and return the |
| @@ -227,6 +221,7 @@ struct dma_async_tx_descriptor { | |||
| 227 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 221 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
| 228 | * @device_prep_slave_sg: prepares a slave dma operation | 222 | * @device_prep_slave_sg: prepares a slave dma operation |
| 229 | * @device_terminate_all: terminate all pending operations | 223 | * @device_terminate_all: terminate all pending operations |
| 224 | * @device_is_tx_complete: poll for transaction completion | ||
| 230 | * @device_issue_pending: push pending transactions to hardware | 225 | * @device_issue_pending: push pending transactions to hardware |
| 231 | */ | 226 | */ |
| 232 | struct dma_device { | 227 | struct dma_device { |
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h index c37e9241fae..ed21bd3dbd2 100644 --- a/include/linux/hdreg.h +++ b/include/linux/hdreg.h | |||
| @@ -511,7 +511,6 @@ struct hd_driveid { | |||
| 511 | unsigned short words69_70[2]; /* reserved words 69-70 | 511 | unsigned short words69_70[2]; /* reserved words 69-70 |
| 512 | * future command overlap and queuing | 512 | * future command overlap and queuing |
| 513 | */ | 513 | */ |
| 514 | /* HDIO_GET_IDENTITY currently returns only words 0 through 70 */ | ||
| 515 | unsigned short words71_74[4]; /* reserved words 71-74 | 514 | unsigned short words71_74[4]; /* reserved words 71-74 |
| 516 | * for IDENTIFY PACKET DEVICE command | 515 | * for IDENTIFY PACKET DEVICE command |
| 517 | */ | 516 | */ |
diff --git a/include/linux/ide.h b/include/linux/ide.h index fe235b65207..e0cedfe9fad 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
| @@ -866,6 +866,7 @@ struct ide_host { | |||
| 866 | unsigned int n_ports; | 866 | unsigned int n_ports; |
| 867 | struct device *dev[2]; | 867 | struct device *dev[2]; |
| 868 | unsigned int (*init_chipset)(struct pci_dev *); | 868 | unsigned int (*init_chipset)(struct pci_dev *); |
| 869 | irq_handler_t irq_handler; | ||
| 869 | unsigned long host_flags; | 870 | unsigned long host_flags; |
| 870 | void *host_priv; | 871 | void *host_priv; |
| 871 | ide_hwif_t *cur_port; /* for hosts requiring serialization */ | 872 | ide_hwif_t *cur_port; /* for hosts requiring serialization */ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 5d87bc09a1f..dc18b87ed72 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -275,7 +275,7 @@ enum { | |||
| 275 | * advised to wait only for the following duration before | 275 | * advised to wait only for the following duration before |
| 276 | * doing SRST. | 276 | * doing SRST. |
| 277 | */ | 277 | */ |
| 278 | ATA_TMOUT_PMP_SRST_WAIT = 1000, | 278 | ATA_TMOUT_PMP_SRST_WAIT = 5000, |
| 279 | 279 | ||
| 280 | /* ATA bus states */ | 280 | /* ATA bus states */ |
| 281 | BUS_UNKNOWN = 0, | 281 | BUS_UNKNOWN = 0, |
| @@ -530,6 +530,7 @@ struct ata_queued_cmd { | |||
| 530 | unsigned long flags; /* ATA_QCFLAG_xxx */ | 530 | unsigned long flags; /* ATA_QCFLAG_xxx */ |
| 531 | unsigned int tag; | 531 | unsigned int tag; |
| 532 | unsigned int n_elem; | 532 | unsigned int n_elem; |
| 533 | unsigned int orig_n_elem; | ||
| 533 | 534 | ||
| 534 | int dma_dir; | 535 | int dma_dir; |
| 535 | 536 | ||
| @@ -750,7 +751,8 @@ struct ata_port { | |||
| 750 | acpi_handle acpi_handle; | 751 | acpi_handle acpi_handle; |
| 751 | struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */ | 752 | struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */ |
| 752 | #endif | 753 | #endif |
| 753 | u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ | 754 | /* owned by EH */ |
| 755 | u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned; | ||
| 754 | }; | 756 | }; |
| 755 | 757 | ||
| 756 | /* The following initializer overrides a method to NULL whether one of | 758 | /* The following initializer overrides a method to NULL whether one of |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ec54785d34f..659366734f3 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -1079,6 +1079,7 @@ extern void synchronize_net(void); | |||
| 1079 | extern int register_netdevice_notifier(struct notifier_block *nb); | 1079 | extern int register_netdevice_notifier(struct notifier_block *nb); |
| 1080 | extern int unregister_netdevice_notifier(struct notifier_block *nb); | 1080 | extern int unregister_netdevice_notifier(struct notifier_block *nb); |
| 1081 | extern int init_dummy_netdev(struct net_device *dev); | 1081 | extern int init_dummy_netdev(struct net_device *dev); |
| 1082 | extern void netdev_resync_ops(struct net_device *dev); | ||
| 1082 | 1083 | ||
| 1083 | extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); | 1084 | extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); |
| 1084 | extern struct net_device *dev_get_by_index(struct net *net, int ifindex); | 1085 | extern struct net_device *dev_get_by_index(struct net *net, int ifindex); |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 3577ffd90d4..54a968b4b92 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include <linux/slab.h> /* For kmalloc() */ | 5 | #include <linux/slab.h> /* For kmalloc() */ |
| 6 | #include <linux/smp.h> | 6 | #include <linux/smp.h> |
| 7 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
| 8 | #include <linux/pfn.h> | ||
| 8 | 9 | ||
| 9 | #include <asm/percpu.h> | 10 | #include <asm/percpu.h> |
| 10 | 11 | ||
| @@ -52,17 +53,18 @@ | |||
| 52 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | 53 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) |
| 53 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | 54 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) |
| 54 | 55 | ||
| 55 | /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ | 56 | /* enough to cover all DEFINE_PER_CPUs in modules */ |
| 56 | #ifndef PERCPU_ENOUGH_ROOM | ||
| 57 | #ifdef CONFIG_MODULES | 57 | #ifdef CONFIG_MODULES |
| 58 | #define PERCPU_MODULE_RESERVE 8192 | 58 | #define PERCPU_MODULE_RESERVE (8 << 10) |
| 59 | #else | 59 | #else |
| 60 | #define PERCPU_MODULE_RESERVE 0 | 60 | #define PERCPU_MODULE_RESERVE 0 |
| 61 | #endif | 61 | #endif |
| 62 | 62 | ||
| 63 | #ifndef PERCPU_ENOUGH_ROOM | ||
| 63 | #define PERCPU_ENOUGH_ROOM \ | 64 | #define PERCPU_ENOUGH_ROOM \ |
| 64 | (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE) | 65 | (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ |
| 65 | #endif /* PERCPU_ENOUGH_ROOM */ | 66 | PERCPU_MODULE_RESERVE) |
| 67 | #endif | ||
| 66 | 68 | ||
| 67 | /* | 69 | /* |
| 68 | * Must be an lvalue. Since @var must be a simple identifier, | 70 | * Must be an lvalue. Since @var must be a simple identifier, |
| @@ -76,52 +78,90 @@ | |||
| 76 | 78 | ||
| 77 | #ifdef CONFIG_SMP | 79 | #ifdef CONFIG_SMP |
| 78 | 80 | ||
| 81 | #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA | ||
| 82 | |||
| 83 | /* minimum unit size, also is the maximum supported allocation size */ | ||
| 84 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) | ||
| 85 | |||
| 86 | /* | ||
| 87 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy | ||
| 88 | * back on the first chunk for dynamic percpu allocation if arch is | ||
| 89 | * manually allocating and mapping it for faster access (as a part of | ||
| 90 | * large page mapping for example). | ||
| 91 | * | ||
| 92 | * The following values give between one and two pages of free space | ||
| 93 | * after typical minimal boot (2-way SMP, single disk and NIC) with | ||
| 94 | * both defconfig and a distro config on x86_64 and 32. More | ||
| 95 | * intelligent way to determine this would be nice. | ||
| 96 | */ | ||
| 97 | #if BITS_PER_LONG > 32 | ||
| 98 | #define PERCPU_DYNAMIC_RESERVE (20 << 10) | ||
| 99 | #else | ||
| 100 | #define PERCPU_DYNAMIC_RESERVE (12 << 10) | ||
| 101 | #endif | ||
| 102 | |||
| 103 | extern void *pcpu_base_addr; | ||
| 104 | |||
| 105 | typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno); | ||
| 106 | typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); | ||
| 107 | |||
| 108 | extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, | ||
| 109 | size_t static_size, size_t reserved_size, | ||
| 110 | ssize_t unit_size, ssize_t dyn_size, | ||
| 111 | void *base_addr, | ||
| 112 | pcpu_populate_pte_fn_t populate_pte_fn); | ||
| 113 | |||
| 114 | /* | ||
| 115 | * Use this to get to a cpu's version of the per-cpu object | ||
| 116 | * dynamically allocated. Non-atomic access to the current CPU's | ||
| 117 | * version should probably be combined with get_cpu()/put_cpu(). | ||
| 118 | */ | ||
| 119 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) | ||
| 120 | |||
| 121 | extern void *__alloc_reserved_percpu(size_t size, size_t align); | ||
| 122 | |||
| 123 | #else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ | ||
| 124 | |||
| 79 | struct percpu_data { | 125 | struct percpu_data { |
| 80 | void *ptrs[1]; | 126 | void *ptrs[1]; |
| 81 | }; | 127 | }; |
| 82 | 128 | ||
| 83 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) | 129 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) |
| 84 | /* | 130 | |
| 85 | * Use this to get to a cpu's version of the per-cpu object dynamically | 131 | #define per_cpu_ptr(ptr, cpu) \ |
| 86 | * allocated. Non-atomic access to the current CPU's version should | 132 | ({ \ |
| 87 | * probably be combined with get_cpu()/put_cpu(). | 133 | struct percpu_data *__p = __percpu_disguise(ptr); \ |
| 88 | */ | 134 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ |
| 89 | #define percpu_ptr(ptr, cpu) \ | ||
| 90 | ({ \ | ||
| 91 | struct percpu_data *__p = __percpu_disguise(ptr); \ | ||
| 92 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ | ||
| 93 | }) | 135 | }) |
| 94 | 136 | ||
| 95 | extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); | 137 | #endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ |
| 96 | extern void percpu_free(void *__pdata); | 138 | |
| 139 | extern void *__alloc_percpu(size_t size, size_t align); | ||
| 140 | extern void free_percpu(void *__pdata); | ||
| 97 | 141 | ||
| 98 | #else /* CONFIG_SMP */ | 142 | #else /* CONFIG_SMP */ |
| 99 | 143 | ||
| 100 | #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) | 144 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
| 101 | 145 | ||
| 102 | static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) | 146 | static inline void *__alloc_percpu(size_t size, size_t align) |
| 103 | { | 147 | { |
| 104 | return kzalloc(size, gfp); | 148 | /* |
| 149 | * Can't easily make larger alignment work with kmalloc. WARN | ||
| 150 | * on it. Larger alignment should only be used for module | ||
| 151 | * percpu sections on SMP for which this path isn't used. | ||
| 152 | */ | ||
| 153 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); | ||
| 154 | return kzalloc(size, GFP_KERNEL); | ||
| 105 | } | 155 | } |
| 106 | 156 | ||
| 107 | static inline void percpu_free(void *__pdata) | 157 | static inline void free_percpu(void *p) |
| 108 | { | 158 | { |
| 109 | kfree(__pdata); | 159 | kfree(p); |
| 110 | } | 160 | } |
| 111 | 161 | ||
| 112 | #endif /* CONFIG_SMP */ | 162 | #endif /* CONFIG_SMP */ |
| 113 | 163 | ||
| 114 | #define percpu_alloc_mask(size, gfp, mask) \ | 164 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ |
| 115 | __percpu_alloc_mask((size), (gfp), &(mask)) | 165 | __alignof__(type)) |
| 116 | |||
| 117 | #define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map) | ||
| 118 | |||
| 119 | /* (legacy) interface for use without CPU hotplug handling */ | ||
| 120 | |||
| 121 | #define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \ | ||
| 122 | cpu_possible_map) | ||
| 123 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type)) | ||
| 124 | #define free_percpu(ptr) percpu_free((ptr)) | ||
| 125 | #define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu)) | ||
| 126 | 166 | ||
| 127 | #endif /* __LINUX_PERCPU_H */ | 167 | #endif /* __LINUX_PERCPU_H */ |
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index f3f697df1d7..80044a4f3ab 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h | |||
| @@ -181,4 +181,10 @@ extern long rcu_batches_completed_bh(void); | |||
| 181 | #define rcu_enter_nohz() do { } while (0) | 181 | #define rcu_enter_nohz() do { } while (0) |
| 182 | #define rcu_exit_nohz() do { } while (0) | 182 | #define rcu_exit_nohz() do { } while (0) |
| 183 | 183 | ||
| 184 | /* A context switch is a grace period for rcuclassic. */ | ||
| 185 | static inline int rcu_blocking_is_gp(void) | ||
| 186 | { | ||
| 187 | return num_online_cpus() == 1; | ||
| 188 | } | ||
| 189 | |||
| 184 | #endif /* __LINUX_RCUCLASSIC_H */ | 190 | #endif /* __LINUX_RCUCLASSIC_H */ |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 921340a7b71..528343e6da5 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -52,6 +52,9 @@ struct rcu_head { | |||
| 52 | void (*func)(struct rcu_head *head); | 52 | void (*func)(struct rcu_head *head); |
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| 55 | /* Internal to kernel, but needed by rcupreempt.h. */ | ||
| 56 | extern int rcu_scheduler_active; | ||
| 57 | |||
| 55 | #if defined(CONFIG_CLASSIC_RCU) | 58 | #if defined(CONFIG_CLASSIC_RCU) |
| 56 | #include <linux/rcuclassic.h> | 59 | #include <linux/rcuclassic.h> |
| 57 | #elif defined(CONFIG_TREE_RCU) | 60 | #elif defined(CONFIG_TREE_RCU) |
| @@ -265,6 +268,7 @@ extern void rcu_barrier_sched(void); | |||
| 265 | 268 | ||
| 266 | /* Internal to kernel */ | 269 | /* Internal to kernel */ |
| 267 | extern void rcu_init(void); | 270 | extern void rcu_init(void); |
| 271 | extern void rcu_scheduler_starting(void); | ||
| 268 | extern int rcu_needs_cpu(int cpu); | 272 | extern int rcu_needs_cpu(int cpu); |
| 269 | 273 | ||
| 270 | #endif /* __LINUX_RCUPDATE_H */ | 274 | #endif /* __LINUX_RCUPDATE_H */ |
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index 3e05c09b54a..74304b4538d 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h | |||
| @@ -142,4 +142,19 @@ static inline void rcu_exit_nohz(void) | |||
| 142 | #define rcu_exit_nohz() do { } while (0) | 142 | #define rcu_exit_nohz() do { } while (0) |
| 143 | #endif /* CONFIG_NO_HZ */ | 143 | #endif /* CONFIG_NO_HZ */ |
| 144 | 144 | ||
| 145 | /* | ||
| 146 | * A context switch is a grace period for rcupreempt synchronize_rcu() | ||
| 147 | * only during early boot, before the scheduler has been initialized. | ||
| 148 | * So, how the heck do we get a context switch? Well, if the caller | ||
| 149 | * invokes synchronize_rcu(), they are willing to accept a context | ||
| 150 | * switch, so we simply pretend that one happened. | ||
| 151 | * | ||
| 152 | * After boot, there might be a blocked or preempted task in an RCU | ||
| 153 | * read-side critical section, so we cannot then take the fastpath. | ||
| 154 | */ | ||
| 155 | static inline int rcu_blocking_is_gp(void) | ||
| 156 | { | ||
| 157 | return num_online_cpus() == 1 && !rcu_scheduler_active; | ||
| 158 | } | ||
| 159 | |||
| 145 | #endif /* __LINUX_RCUPREEMPT_H */ | 160 | #endif /* __LINUX_RCUPREEMPT_H */ |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index d4368b7975c..a722fb67bb2 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
| @@ -326,4 +326,10 @@ static inline void rcu_exit_nohz(void) | |||
| 326 | } | 326 | } |
| 327 | #endif /* CONFIG_NO_HZ */ | 327 | #endif /* CONFIG_NO_HZ */ |
| 328 | 328 | ||
| 329 | /* A context switch is a grace period for rcutree. */ | ||
| 330 | static inline int rcu_blocking_is_gp(void) | ||
| 331 | { | ||
| 332 | return num_online_cpus() == 1; | ||
| 333 | } | ||
| 334 | |||
| 329 | #endif /* __LINUX_RCUTREE_H */ | 335 | #endif /* __LINUX_RCUTREE_H */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index f0a50b20e8a..a7c7698583b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -2303,9 +2303,13 @@ extern long sched_group_rt_runtime(struct task_group *tg); | |||
| 2303 | extern int sched_group_set_rt_period(struct task_group *tg, | 2303 | extern int sched_group_set_rt_period(struct task_group *tg, |
| 2304 | long rt_period_us); | 2304 | long rt_period_us); |
| 2305 | extern long sched_group_rt_period(struct task_group *tg); | 2305 | extern long sched_group_rt_period(struct task_group *tg); |
| 2306 | extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); | ||
| 2306 | #endif | 2307 | #endif |
| 2307 | #endif | 2308 | #endif |
| 2308 | 2309 | ||
| 2310 | extern int task_can_switch_user(struct user_struct *up, | ||
| 2311 | struct task_struct *tsk); | ||
| 2312 | |||
| 2309 | #ifdef CONFIG_TASK_XACCT | 2313 | #ifdef CONFIG_TASK_XACCT |
| 2310 | static inline void add_rchar(struct task_struct *tsk, ssize_t amt) | 2314 | static inline void add_rchar(struct task_struct *tsk, ssize_t amt) |
| 2311 | { | 2315 | { |
diff --git a/include/linux/serio.h b/include/linux/serio.h index 1bcb357a01a..e0417e4d3f1 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h | |||
| @@ -212,7 +212,7 @@ static inline void serio_unpin_driver(struct serio *serio) | |||
| 212 | #define SERIO_FUJITSU 0x35 | 212 | #define SERIO_FUJITSU 0x35 |
| 213 | #define SERIO_ZHENHUA 0x36 | 213 | #define SERIO_ZHENHUA 0x36 |
| 214 | #define SERIO_INEXIO 0x37 | 214 | #define SERIO_INEXIO 0x37 |
| 215 | #define SERIO_TOUCHIT213 0x37 | 215 | #define SERIO_TOUCHIT213 0x38 |
| 216 | #define SERIO_W8001 0x39 | 216 | #define SERIO_W8001 0x39 |
| 217 | 217 | ||
| 218 | #endif | 218 | #endif |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 9c0890c7a06..a43ebec3a7b 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
| @@ -95,6 +95,9 @@ extern struct vm_struct *remove_vm_area(const void *addr); | |||
| 95 | 95 | ||
| 96 | extern int map_vm_area(struct vm_struct *area, pgprot_t prot, | 96 | extern int map_vm_area(struct vm_struct *area, pgprot_t prot, |
| 97 | struct page ***pages); | 97 | struct page ***pages); |
| 98 | extern int map_kernel_range_noflush(unsigned long start, unsigned long size, | ||
| 99 | pgprot_t prot, struct page **pages); | ||
| 100 | extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); | ||
| 98 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); | 101 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); |
| 99 | 102 | ||
| 100 | /* Allocate/destroy a 'vmalloc' VM area. */ | 103 | /* Allocate/destroy a 'vmalloc' VM area. */ |
| @@ -110,5 +113,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count); | |||
| 110 | */ | 113 | */ |
| 111 | extern rwlock_t vmlist_lock; | 114 | extern rwlock_t vmlist_lock; |
| 112 | extern struct vm_struct *vmlist; | 115 | extern struct vm_struct *vmlist; |
| 116 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); | ||
| 113 | 117 | ||
| 114 | #endif /* _LINUX_VMALLOC_H */ | 118 | #endif /* _LINUX_VMALLOC_H */ |
